diff --git a/.dockerignore b/.dockerignore
index a5d8f7237b2..e660fd93d31 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,2 +1 @@
bin/
-dist/
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 00000000000..209b8217f30
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,2 @@
+# global rules
+* @docker/compose-maintainers
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index 873154a9e2e..00000000000
--- a/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,64 +0,0 @@
-
-
-**Description**
-
-
-
-**Steps to reproduce the issue:**
-1.
-2.
-3.
-
-**Describe the results you received:**
-
-
-**Describe the results you expected:**
-
-
-**Additional information you deem important (e.g. issue happens only occasionally):**
-
-**Output of `docker compose version`:**
-
-```
-(paste your output here)
-```
-
-**Output of `docker info`:**
-
-```
-(paste your output here)
-```
-
-**Additional environment details:**
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 00000000000..37b546967dd
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,55 @@
+name: 🐞 Bug
+description: File a bug/issue
+title: "[BUG]
"
+labels: ['status/0-triage', 'kind/bug']
+body:
+ - type: textarea
+ attributes:
+ label: Description
+ description: |
+ Briefly describe the problem you are having.
+
+ Include both the current behavior (what you are seeing) as well as what you expected to happen.
+ validations:
+ required: true
+ - type: markdown
+ attributes:
+ value: |
+ [Docker Swarm](https://www.mirantis.com/software/swarm/) uses a distinct compose file parser and
+ as such doesn't support some of the recent features of Docker Compose. Please contact Mirantis
+ if you need assistance with compose file support in Docker Swarm.
+ - type: textarea
+ attributes:
+ label: Steps To Reproduce
+ description: Steps to reproduce the behavior.
+ placeholder: |
+ 1. In this environment...
+ 2. With this config...
+ 3. Run '...'
+ 4. See error...
+ validations:
+ required: false
+ - type: textarea
+ attributes:
+ label: Compose Version
+ description: |
+ Paste output of `docker compose version` and `docker-compose version`.
+ render: Text
+ validations:
+ required: false
+ - type: textarea
+ attributes:
+ label: Docker Environment
+ description: Paste output of `docker info`.
+ render: Text
+ validations:
+ required: false
+ - type: textarea
+ attributes:
+ label: Anything else?
+ description: |
+ Links? References? Anything that will give us more context about the issue you are encountering!
+
+ Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
+ validations:
+ required: false
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 00000000000..cc4b65bf24d
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,11 @@
+blank_issues_enabled: true
+contact_links:
+ - name: Docker Community Slack
+ url: https://dockr.ly/slack
+ about: 'Use the #docker-compose channel'
+ - name: Docker Support Forums
+ url: https://forums.docker.com/c/open-source-projects/compose/15
+ about: 'Use the "Open Source Projects > Compose" category'
+ - name: 'Ask on Stack Overflow'
+ url: https://stackoverflow.com/questions/tagged/docker-compose
+ about: 'Use the [docker-compose] tag when creating new questions'
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml
new file mode 100644
index 00000000000..677a1684fc0
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yaml
@@ -0,0 +1,13 @@
+name: Feature request
+description: Missing functionality? Come tell us about it!
+labels:
+ - kind/feature
+ - status/0-triage
+body:
+ - type: textarea
+ id: description
+ attributes:
+ label: Description
+ description: What is the feature you want to see?
+ validations:
+ required: true
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 5fa9759fe57..00e87ff8eaa 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -3,4 +3,4 @@
**Related issue**
-**(not mandatory) A picture of a cute animal, if possible in relation with what you did**
+**(not mandatory) A picture of a cute animal, if possible in relation to what you did**
diff --git a/.github/SECURITY.md b/.github/SECURITY.md
new file mode 100644
index 00000000000..88f7528d524
--- /dev/null
+++ b/.github/SECURITY.md
@@ -0,0 +1,44 @@
+# Security Policy
+
+The maintainers of Docker Compose take security seriously. If you discover
+a security issue, please bring it to their attention right away!
+
+## Reporting a Vulnerability
+
+Please **DO NOT** file a public issue, instead send your report privately
+to [security@docker.com](mailto:security@docker.com).
+
+Reporter(s) can expect a response within 72 hours, acknowledging the issue was
+received.
+
+## Review Process
+
+After receiving the report, an initial triage and technical analysis is
+performed to confirm the report and determine its scope. We may request
+additional information in this stage of the process.
+
+Once a reviewer has confirmed the relevance of the report, a draft security
+advisory will be created on GitHub. The draft advisory will be used to discuss
+the issue with maintainers, the reporter(s), and where applicable, other
+affected parties under embargo.
+
+If the vulnerability is accepted, a timeline for developing a patch, public
+disclosure, and patch release will be determined. If there is an embargo period
+on public disclosure before the patch release, the reporter(s) are expected to
+participate in the discussion of the timeline and abide by agreed upon dates
+for public disclosure.
+
+## Accreditation
+
+Security reports are greatly appreciated and we will publicly thank you,
+although we will keep your name confidential if you request it. We also like to
+send gifts - if you're into swag, make sure to let us know. We do not currently
+offer a paid security bounty program at this time.
+
+## Supported Versions
+
+This project docs not provide long-term supported versions, and only the current
+release and `main` branch are actively maintained. Docker Compose v1, and the
+corresponding [v1 branch](https://github.com/docker/compose/tree/v1) reached
+EOL and are no longer supported.
+
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 36b24f29dca..3810add71fc 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -4,3 +4,20 @@ updates:
directory: /
schedule:
interval: daily
+ ignore:
+ # docker + moby deps require coordination
+ - dependency-name: "github.com/docker/buildx"
+ # buildx is still 0.x
+ update-types: ["version-update:semver-minor"]
+ - dependency-name: "github.com/moby/buildkit"
+ # buildkit is still 0.x
+ update-types: [ "version-update:semver-minor" ]
+ - dependency-name: "github.com/docker/cli"
+ update-types: ["version-update:semver-major"]
+ - dependency-name: "github.com/docker/docker"
+ update-types: ["version-update:semver-major"]
+ - dependency-name: "github.com/containerd/containerd"
+ # containerd major/minor must be kept in sync with moby
+ update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
+ # OTEL dependencies should be upgraded in sync with engine, cli, buildkit and buildx projects
+ - dependency-name: "go.opentelemetry.io/*"
diff --git a/.github/stale.yml b/.github/stale.yml
index 3caf0737165..c14cb12918a 100644
--- a/.github/stale.yml
+++ b/.github/stale.yml
@@ -1,7 +1,7 @@
# Configuration for probot-stale - https://github.com/probot/stale
# Number of days of inactivity before an Issue or Pull Request becomes stale
-daysUntilStale: 180
+daysUntilStale: 90
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
@@ -12,7 +12,7 @@ onlyLabels: []
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
exemptLabels:
- - "enhancement ✨"
+ - "kind/feature"
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
diff --git a/.github/workflows/artifacts.yml b/.github/workflows/artifacts.yml
deleted file mode 100644
index 760189dacf5..00000000000
--- a/.github/workflows/artifacts.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-name: Publish Artifacts
-on:
- issue_comment:
- types: [created]
-jobs:
- publish-artifacts:
- if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/generate-artifacts')
- runs-on: ubuntu-latest
- steps:
- - name: Set up Go 1.17
- uses: actions/setup-go@v2
- with:
- go-version: 1.17
- id: go
-
- - name: Checkout code into the Go module directory
- uses: actions/checkout@v2
-
- - uses: actions/cache@v2
- with:
- path: ~/go/pkg/mod
- key: go-${{ hashFiles('**/go.sum') }}
-
- - name: Build cross platform compose-plugin binaries
- run: make -f builder.Makefile cross
-
- - name: Upload macos-amd64 binary
- uses: actions/upload-artifact@v2
- with:
- name: docker-compose-darwin-amd64
- path: ${{ github.workspace }}/bin/docker-compose-darwin-amd64
-
- - name: Upload macos-arm64 binary
- uses: actions/upload-artifact@v2
- with:
- name: docker-compose-darwin-arm64
- path: ${{ github.workspace }}/bin/docker-compose-darwin-arm64
-
- - name: Upload linux-amd64 binary
- uses: actions/upload-artifact@v2
- with:
- name: docker-compose-linux-amd64
- path: ${{ github.workspace }}/bin/docker-compose-linux-amd64
-
- - name: Upload windows-amd64 binary
- uses: actions/upload-artifact@v2
- with:
- name: docker-compose-windows-amd64.exe
- path: ${{ github.workspace }}/bin/docker-compose-windows-amd64.exe
-
- - name: Update comment
- uses: peter-evans/create-or-update-comment@v1
- with:
- comment-id: ${{ github.event.comment.id }}
- body: |
- This PR can be tested using [binaries](https://github.com/docker/compose-cli/actions/runs/${{ github.run_id }}).
- reactions: eyes
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 57fe3dad592..3bc4bd18a54 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,129 +1,332 @@
-name: Continuous integration
+name: ci
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
on:
push:
branches:
- - v2
+ - 'main'
+ tags:
+ - 'v*'
pull_request:
+ workflow_dispatch:
+ inputs:
+ debug_enabled:
+ description: 'To run with tmate enter "debug_enabled"'
+ required: false
+ default: "false"
+
+permissions:
+ contents: read # to fetch code (actions/checkout)
jobs:
- lint:
- name: Lint
+ prepare:
runs-on: ubuntu-latest
- env:
- GO111MODULE: "on"
+ outputs:
+ matrix: ${{ steps.platforms.outputs.matrix }}
steps:
- - name: Set up Go 1.17
- uses: actions/setup-go@v2
- with:
- go-version: 1.17
- id: go
-
- - name: Checkout code into the Go module directory
- uses: actions/checkout@v2
+ -
+ name: Checkout
+ uses: actions/checkout@v4
+ -
+ name: Create matrix
+ id: platforms
+ run: |
+ echo matrix=$(docker buildx bake binary-cross --print | jq -cr '.target."binary-cross".platforms') >> $GITHUB_OUTPUT
+ -
+ name: Show matrix
+ run: |
+ echo ${{ steps.platforms.outputs.matrix }}
- - name: Validate go-mod, license headers and docs are up-to-date
- run: make validate
+ validate:
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ target:
+ - lint
+ - validate-go-mod
+ - validate-headers
+ - validate-docs
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@v4
+ -
+ name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+ -
+ name: Run
+ run: |
+ make ${{ matrix.target }}
- - name: Run golangci-lint
+ binary:
+ runs-on: ubuntu-latest
+ needs:
+ - prepare
+ strategy:
+ fail-fast: false
+ matrix:
+ platform: ${{ fromJson(needs.prepare.outputs.matrix) }}
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@v4
+ -
+ name: Prepare
+ run: |
+ platform=${MATRIX_PLATFORM}
+ echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
env:
- BUILD_TAGS: e2e
- uses: golangci/golangci-lint-action@v2
+ MATRIX_PLATFORM: ${{ matrix.platform }}
+ -
+ name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+ -
+ name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+ -
+ name: Build
+ uses: docker/bake-action@v6
with:
- args: --timeout=180s
+ source: .
+ targets: release
+ provenance: mode=max
+ sbom: true
+ set: |
+ *.platform=${{ matrix.platform }}
+ *.cache-from=type=gha,scope=binary-${{ env.PLATFORM_PAIR }}
+ *.cache-to=type=gha,scope=binary-${{ env.PLATFORM_PAIR }},mode=max
+ -
+ name: Rename provenance and sbom
+ working-directory: ./bin/release
+ run: |
+ binname=$(find . -name 'docker-compose-*')
+ filename=$(basename "$binname" | sed -E 's/\.exe$//')
+ mv "provenance.json" "${filename}.provenance.json"
+ mv "sbom-binary.spdx.json" "${filename}.sbom.json"
+ find . -name 'sbom*.json' -exec rm {} \;
+ -
+ name: List artifacts
+ run: |
+ tree -nh ./bin/release
+ -
+ name: Upload artifacts
+ uses: actions/upload-artifact@v4
+ with:
+ name: compose-${{ env.PLATFORM_PAIR }}
+ path: ./bin/release
+ if-no-files-found: error
- # only on main branch, costs too much for the gain on every PR
- validate-cross-build:
- name: Validate cross build
+ test:
runs-on: ubuntu-latest
- if: github.ref == 'refs/heads/main'
- env:
- GO111MODULE: "on"
steps:
- - name: Set up Go 1.17
- uses: actions/setup-go@v2
+ -
+ name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+ -
+ name: Test
+ uses: docker/bake-action@v6
with:
- go-version: 1.17
- id: go
-
- - name: Checkout code into the Go module directory
- uses: actions/checkout@v2
-
- - uses: actions/cache@v2
+ targets: test
+ set: |
+ *.cache-from=type=gha,scope=test
+ *.cache-to=type=gha,scope=test
+ -
+ name: Gather coverage data
+ uses: actions/upload-artifact@v4
with:
- path: ~/go/pkg/mod
- key: go-${{ hashFiles('**/go.sum') }}
-
- # Ensure we don't discover cross platform build issues at release time.
- # Time used to build linux here is gained back in the build for local E2E step
- - name: Build packages
- run: make -f builder.Makefile cross
-
- build-plugin:
- name: Build and tests in plugin mode
+ name: coverage-data-unit
+ path: bin/coverage/unit/
+ if-no-files-found: error
+ -
+ name: Unit Test Summary
+ uses: test-summary/action@v2
+ with:
+ paths: bin/coverage/unit/report.xml
+ if: always()
+ e2e:
runs-on: ubuntu-latest
- env:
- GO111MODULE: "on"
+ strategy:
+ fail-fast: false
+ matrix:
+ mode:
+ - plugin
+ - standalone
+ engine:
+ - 27 # old stable (latest major - 1)
+ - 28 # current stable
steps:
- - name: Set up Go 1.17
- uses: actions/setup-go@v2
- with:
- go-version: 1.17
- id: go
+ - name: Prepare
+ run: |
+ mode=${{ matrix.mode }}
+ engine=${{ matrix.engine }}
+ echo "MODE_ENGINE_PAIR=${mode}-${engine}" >> $GITHUB_ENV
- - name: Setup docker CLI
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Install Docker ${{ matrix.engine }}
run: |
- curl https://download.docker.com/linux/static/stable/x86_64/docker-20.10.3.tgz | tar xz
- sudo cp ./docker/docker /usr/bin/ && rm -rf docker && docker version
+ sudo systemctl stop docker.service
+ sudo apt-get purge docker-ce docker-ce-cli containerd.io docker-compose-plugin docker-ce-rootless-extras docker-buildx-plugin
+ sudo apt-get install curl
+ curl -fsSL https://test.docker.com -o get-docker.sh
+ sudo sh ./get-docker.sh --version ${{ matrix.engine }}
+
+ - name: Check Docker Version
+ run: docker --version
- - name: Checkout code into the Go module directory
- uses: actions/checkout@v2
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
- - uses: actions/cache@v2
+ - name: Set up Docker Model
+ run: |
+ sudo apt-get install docker-model-plugin
+ docker model version
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
with:
- path: ~/go/pkg/mod
- key: go-${{ hashFiles('**/go.sum') }}
+ go-version-file: 'go.mod'
+ check-latest: true
+ cache: true
- - name: Test
- run: make -f builder.Makefile test
+ - name: Build example provider
+ run: make example-provider
- - name: Build for local E2E
+ - name: Build
+ uses: docker/bake-action@v6
+ with:
+ source: .
+ targets: binary-with-coverage
+ set: |
+ *.cache-from=type=gha,scope=binary-linux-amd64
+ *.cache-from=type=gha,scope=binary-e2e-${{ matrix.mode }}
+ *.cache-to=type=gha,scope=binary-e2e-${{ matrix.mode }},mode=max
env:
BUILD_TAGS: e2e
- run: make -f builder.Makefile compose-plugin
- - name: E2E Test in plugin mode
- run: make e2e-compose
-
- build-standalone:
- name: Build and tests in standalone mode
- runs-on: ubuntu-latest
- env:
- GO111MODULE: "on"
- steps:
- - name: Set up Go 1.17
- uses: actions/setup-go@v2
+ - name: Setup tmate session
+ if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled }}
+ uses: mxschmitt/action-tmate@8b4e4ac71822ed7e0ad5fb3d1c33483e9e8fb270 # v3.11
with:
- go-version: 1.17
- id: go
+ limit-access-to-actor: true
+ github-token: ${{ secrets.GITHUB_TOKEN }}
- - name: Setup docker CLI
+ - name: Test plugin mode
+ if: ${{ matrix.mode == 'plugin' }}
run: |
- curl https://download.docker.com/linux/static/stable/x86_64/docker-20.10.3.tgz | tar xz
- sudo cp ./docker/docker /usr/bin/ && rm -rf docker && docker version
+ rm -rf ./bin/coverage/e2e
+ mkdir -p ./bin/coverage/e2e
+ make e2e-compose GOCOVERDIR=bin/coverage/e2e TEST_FLAGS="-v"
- - name: Checkout code into the Go module directory
- uses: actions/checkout@v2
+ - name: Gather coverage data
+ if: ${{ matrix.mode == 'plugin' }}
+ uses: actions/upload-artifact@v4
+ with:
+ name: coverage-data-e2e-${{ env.MODE_ENGINE_PAIR }}
+ path: bin/coverage/e2e/
+ if-no-files-found: error
- - uses: actions/cache@v2
+ - name: Test standalone mode
+ if: ${{ matrix.mode == 'standalone' }}
+ run: |
+ rm -f /usr/local/bin/docker-compose
+ cp bin/build/docker-compose /usr/local/bin
+ make e2e-compose-standalone
+
+ - name: e2e Test Summary
+ uses: test-summary/action@v2
+ with:
+ paths: /tmp/report/report.xml
+ if: always()
+ coverage:
+ runs-on: ubuntu-latest
+ needs:
+ - test
+ - e2e
+ steps:
+ # codecov won't process the report without the source code available
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: 'go.mod'
+ check-latest: true
+ - name: Download unit test coverage
+ uses: actions/download-artifact@v4
+ with:
+ name: coverage-data-unit
+ path: coverage/unit
+ merge-multiple: true
+ - name: Download E2E test coverage
+ uses: actions/download-artifact@v4
with:
- path: ~/go/pkg/mod
- key: go-${{ hashFiles('**/go.sum') }}
+ pattern: coverage-data-e2e-*
+ path: coverage/e2e
+ merge-multiple: true
+ - name: Merge coverage reports
+ run: |
+ go tool covdata textfmt -i=./coverage/unit,./coverage/e2e -o ./coverage.txt
+ - name: Store coverage report in GitHub Actions
+ uses: actions/upload-artifact@v4
+ with:
+ name: go-covdata-txt
+ path: ./coverage.txt
+ if-no-files-found: error
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v3
+ with:
+ files: ./coverage.txt
- - name: Build for local E2E
- env:
- BUILD_TAGS: e2e
- run: make -f builder.Makefile compose-plugin
+ release:
+ permissions:
+ contents: write # to create a release (ncipollo/release-action)
- - name: E2E Test in standalone mode
- run: make e2e-compose-standalone
+ runs-on: ubuntu-latest
+ needs:
+ - binary
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@v4
+ -
+ name: Download artifacts
+ uses: actions/download-artifact@v4
+ with:
+ pattern: compose-*
+ path: ./bin/release
+ merge-multiple: true
+ -
+ name: Create checksums
+ working-directory: ./bin/release
+ run: |
+ find . -type f -print0 | sort -z | xargs -r0 shasum -a 256 -b | sed 's# \*\./# *#' > $RUNNER_TEMP/checksums.txt
+ shasum -a 256 -U -c $RUNNER_TEMP/checksums.txt
+ mv $RUNNER_TEMP/checksums.txt .
+ cat checksums.txt | while read sum file; do
+ if [[ "${file#\*}" == docker-compose-* && "${file#\*}" != *.provenance.json && "${file#\*}" != *.sbom.json ]]; then
+ echo "$sum $file" > ${file#\*}.sha256
+ fi
+ done
+ -
+ name: List artifacts
+ run: |
+ tree -nh ./bin/release
+ -
+ name: Check artifacts
+ run: |
+ find bin/release -type f -exec file -e ascii -- {} +
+ -
+ name: GitHub Release
+ if: startsWith(github.ref, 'refs/tags/v')
+ uses: ncipollo/release-action@58ae73b360456532aafd58ee170c045abbeaee37 # v1.10.0
+ with:
+ artifacts: ./bin/release/*
+ generateReleaseNotes: true
+ draft: true
+ token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/docs-upstream.yml b/.github/workflows/docs-upstream.yml
new file mode 100644
index 00000000000..214c88381fd
--- /dev/null
+++ b/.github/workflows/docs-upstream.yml
@@ -0,0 +1,51 @@
+# this workflow runs the remote validate bake target from docker/docs
+# to check if yaml reference docs used in this repo are valid
+name: docs-upstream
+
+# Default to 'contents: read', which grants actions to read commits.
+#
+# If any permission is set, any permission not included in the list is
+# implicitly set to "none".
+#
+# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
+permissions:
+ contents: read
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+on:
+ push:
+ branches:
+ - 'main'
+ - 'v[0-9]*'
+ paths:
+ - '.github/workflows/docs-upstream.yml'
+ - 'docs/**'
+ pull_request:
+ paths:
+ - '.github/workflows/docs-upstream.yml'
+ - 'docs/**'
+
+jobs:
+ docs-yaml:
+ runs-on: ubuntu-latest
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@v4
+ -
+ name: Upload reference YAML docs
+ uses: actions/upload-artifact@v4
+ with:
+ name: docs-yaml
+ path: docs/reference
+ retention-days: 1
+
+ validate:
+ uses: docker/docs/.github/workflows/validate-upstream.yml@main
+ needs:
+ - docs-yaml
+ with:
+ module-name: docker/compose
diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml
new file mode 100644
index 00000000000..cb1872a9d31
--- /dev/null
+++ b/.github/workflows/merge.yml
@@ -0,0 +1,163 @@
+name: merge
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+on:
+ push:
+ branches:
+ - 'main'
+ tags:
+ - 'v*'
+
+permissions:
+ contents: read # to fetch code (actions/checkout)
+
+env:
+ REPO_SLUG: "docker/compose-bin"
+
+jobs:
+ e2e:
+ name: Build and test
+ runs-on: ${{ matrix.os }}
+ timeout-minutes: 15
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [desktop-windows, desktop-macos, desktop-m1]
+ # mode: [plugin, standalone]
+ mode: [plugin]
+ env:
+ GO111MODULE: "on"
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: actions/setup-go@v5
+ with:
+ go-version-file: go.mod
+ cache: true
+ check-latest: true
+
+ - name: List Docker resources on machine
+ run: |
+ docker ps --all
+ docker volume ls
+ docker network ls
+ docker image ls
+ - name: Remove Docker resources on machine
+ continue-on-error: true
+ run: |
+ docker kill $(docker ps -q)
+ docker rm -f $(docker ps -aq)
+ docker volume rm -f $(docker volume ls -q)
+ docker ps --all
+
+ - name: Unit tests
+ run: make test
+
+ - name: Build binaries
+ run: |
+ make
+ - name: Check arch of go compose binary
+ run: |
+ file ./bin/build/docker-compose
+ if: ${{ !contains(matrix.os, 'desktop-windows') }}
+ -
+ name: Test plugin mode
+ if: ${{ matrix.mode == 'plugin' }}
+ run: |
+ make e2e-compose
+ -
+ name: Test standalone mode
+ if: ${{ matrix.mode == 'standalone' }}
+ run: |
+ make e2e-compose-standalone
+
+ bin-image:
+ runs-on: ubuntu-22.04
+ outputs:
+ digest: ${{ fromJSON(steps.bake.outputs.metadata).image-cross['containerimage.digest'] }}
+ steps:
+ -
+ name: Free disk space
+ uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
+ with:
+ android: true
+ dotnet: true
+ haskell: true
+ large-packages: true
+ swap-storage: true
+ -
+ name: Checkout
+ uses: actions/checkout@v4
+ -
+ name: Login to DockerHub
+ if: github.event_name != 'pull_request'
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERPUBLICBOT_USERNAME }}
+ password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
+ -
+ name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+ -
+ name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+ -
+ name: Docker meta
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: |
+ ${{ env.REPO_SLUG }}
+ tags: |
+ type=ref,event=tag
+ type=edge
+ bake-target: meta-helper
+ -
+ name: Build and push image
+ uses: docker/bake-action@v6
+ id: bake
+ with:
+ source: .
+ files: |
+ ./docker-bake.hcl
+ ${{ steps.meta.outputs.bake-file }}
+ targets: image-cross
+ push: ${{ github.event_name != 'pull_request' }}
+ sbom: true
+ provenance: mode=max
+ set: |
+ *.cache-from=type=gha,scope=bin-image
+ *.cache-to=type=gha,scope=bin-image,mode=max
+
+ desktop-edge-test:
+ runs-on: ubuntu-latest
+ needs: bin-image
+ steps:
+ -
+ name: Generate Token
+ id: generate_token
+ uses: actions/create-github-app-token@v1
+ with:
+ app-id: ${{ vars.DOCKERDESKTOP_APP_ID }}
+ private-key: ${{ secrets.DOCKERDESKTOP_APP_PRIVATEKEY }}
+ owner: docker
+ repositories: |
+ ${{ secrets.DOCKERDESKTOP_REPO }}
+ -
+ name: Trigger Docker Desktop e2e with edge version
+ uses: actions/github-script@v7
+ with:
+ github-token: ${{ steps.generate_token.outputs.token }}
+ script: |
+ await github.rest.actions.createWorkflowDispatch({
+ owner: 'docker',
+ repo: '${{ secrets.DOCKERDESKTOP_REPO }}',
+ workflow_id: 'compose-edge-integration.yml',
+ ref: 'main',
+ inputs: {
+ "image-tag": "${{ needs.bin-image.outputs.digest }}"
+ }
+ })
diff --git a/.github/workflows/pr-closed.yml b/.github/workflows/pr-closed.yml
deleted file mode 100644
index ab13ae12692..00000000000
--- a/.github/workflows/pr-closed.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-name: PR cleanup
-on:
- pull_request:
- types: [closed]
-jobs:
- delete_pr_artifacts:
- runs-on: ubuntu-latest
- steps:
- - uses: stefanluptak/delete-old-pr-artifacts@v1
- with:
- workflow_filename: ci.yaml
\ No newline at end of file
diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml
deleted file mode 100644
index db5203798e1..00000000000
--- a/.github/workflows/rebase.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: Automatic Rebase
-on:
- issue_comment:
- types: [created]
-jobs:
- rebase:
- name: Rebase
- if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase')
- runs-on: ubuntu-latest
- steps:
- - name: Checkout the latest code
- uses: actions/checkout@v2
- with:
- token: ${{ secrets.GITHUB_TOKEN }}
- fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
- - name: Automatic Rebase
- uses: cirrus-actions/rebase@1.4
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
\ No newline at end of file
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
deleted file mode 100644
index 6f43c96c5cb..00000000000
--- a/.github/workflows/release.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-name: Releaser
-
-on:
- workflow_dispatch:
- inputs:
- tag:
- description: "Release Tag"
- required: true
-
-jobs:
- upload-release:
- runs-on: ubuntu-latest
- steps:
- - name: Set up Go 1.17
- uses: actions/setup-go@v2
- with:
- go-version: 1.17
- id: go
-
- - name: Setup docker CLI
- run: |
- curl https://download.docker.com/linux/static/stable/x86_64/docker-20.10.3.tgz | tar xz
- sudo cp ./docker/docker /usr/bin/ && rm -rf docker && docker version
-
- - name: Checkout code into the Go module directory
- uses: actions/checkout@v2
-
- - uses: actions/cache@v2
- with:
- path: ~/go/pkg/mod
- key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
- restore-keys: |
- ${{ runner.os }}-go-
-
- - name: Build
- run: make GIT_TAG=${{ github.event.inputs.tag }} -f builder.Makefile cross
-
- - name: Compute checksums
- run: cd bin; for f in *; do shasum --algorithm 256 $f > $f.sha256; done
-
- - name: License
- run: cp packaging/* bin/
-
- - uses: ncipollo/release-action@v1
- with:
- artifacts: "bin/*"
- generateReleaseNotes: true
- draft: true
- commit: "v2"
- token: ${{ secrets.GITHUB_TOKEN }}
- tag: ${{ github.event.inputs.tag }}
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
new file mode 100644
index 00000000000..b8f0e5e2500
--- /dev/null
+++ b/.github/workflows/scorecards.yml
@@ -0,0 +1,63 @@
+name: Scorecards supply-chain security
+on:
+ # Only the default branch is supported.
+ branch_protection_rule:
+ schedule:
+ - cron: '44 9 * * 4'
+ push:
+ branches: [ "main" ]
+
+jobs:
+ analysis:
+ name: Scorecards analysis
+ runs-on: ubuntu-latest
+ permissions:
+ # Needed to upload the results to code-scanning dashboard.
+ security-events: write
+ # Used to receive a badge.
+ id-token: write
+ # read permissions to all the other objects
+ actions: read
+ attestations: read
+ checks: read
+ contents: read
+ deployments: read
+ issues: read
+ discussions: read
+ packages: read
+ pages: read
+ pull-requests: read
+ statuses: read
+
+ steps:
+ - name: "Checkout code"
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.4.2
+ with:
+ persist-credentials: false
+
+ - name: "Run analysis"
+ uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # tag=v2.4.0
+ with:
+ results_file: results.sarif
+ results_format: sarif
+
+ # Publish the results for public repositories to enable scorecard badges. For more details, see
+ # https://github.com/ossf/scorecard-action#publishing-results.
+ # For private repositories, `publish_results` will automatically be set to `false`, regardless
+ # of the value entered here.
+ publish_results: true
+
+ # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
+ # format to the repository Actions tab.
+ - name: "Upload artifact"
+ uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # tag=v4.5.0
+ with:
+ name: SARIF file
+ path: results.sarif
+ retention-days: 5
+
+ # Upload the results to GitHub's code scanning dashboard.
+ - name: "Upload to code-scanning"
+ uses: github/codeql-action/upload-sarif@3096afedf9873361b2b2f65e1445b13272c83eb8 # tag=v2.20.00
+ with:
+ sarif_file: results.sarif
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
new file mode 100644
index 00000000000..2a747ee1c25
--- /dev/null
+++ b/.github/workflows/stale.yml
@@ -0,0 +1,33 @@
+name: 'Close stale issues'
+
+# Default to 'contents: read', which grants actions to read commits.
+#
+# If any permission is set, any permission not included in the list is
+# implicitly set to "none".
+#
+# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
+permissions:
+ contents: read
+
+on:
+ schedule:
+ - cron: '0 0 * * 0,3' # at midnight UTC every Sunday and Wednesday
+jobs:
+ stale:
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ pull-requests: write
+ steps:
+ - uses: actions/stale@v9
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: >
+ This issue has been automatically marked as stale because it has not had
+ recent activity. It will be closed if no further activity occurs. Thank you
+ for your contributions.
+ days-before-issue-stale: 150 # marks stale after 5 months
+ days-before-issue-close: 30 # closes 1 month after being marked with no action
+ stale-issue-label: "stale"
+ exempt-issue-labels: "kind/feature,kind/enhancement"
+
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 48047887310..f01544f105f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,6 @@
bin/
-dist/
/.vscode/
+coverage.out
+covdatafiles/
+.DS_Store
+pkg/e2e/*.tar
diff --git a/.golangci.yml b/.golangci.yml
index 48ca9cf9117..1d8ae0bb3e8 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,38 +1,89 @@
+version: "2"
+run:
+ concurrency: 2
linters:
- run:
- concurrency: 2
- skip-dirs:
- - tests/composefiles
- enable-all: false
- disable-all: true
+ default: none
enable:
- - deadcode
+ - copyloopvar
+ - depguard
- errcheck
+ - errorlint
+ - gocritic
- gocyclo
- - gofmt
- - goimports
- - revive
- - gosimple
+ - gomodguard
- govet
- ineffassign
- lll
- misspell
- nakedret
+ - nolintlint
+ - revive
- staticcheck
- - structcheck
- - typecheck
+ - testifylint
- unconvert
- unparam
- unused
- - varcheck
-linters-settings:
- gocyclo:
- min-complexity: 16
- lll:
- line-length: 200
+ settings:
+ depguard:
+ rules:
+ all:
+ deny:
+ - pkg: io/ioutil
+ desc: io/ioutil package has been deprecated
+ - pkg: github.com/docker/docker/errdefs
+ desc: use github.com/containerd/errdefs instead.
+ - pkg: golang.org/x/exp/maps
+ desc: use stdlib maps package
+ - pkg: golang.org/x/exp/slices
+ desc: use stdlib slices package
+ - pkg: gopkg.in/yaml.v2
+ desc: compose-go uses yaml.v3
+ gocritic:
+ disabled-checks:
+ - paramTypeCombine
+ - unnamedResult
+ - whyNoLint
+ enabled-tags:
+ - diagnostic
+ - opinionated
+ - style
+ gocyclo:
+ min-complexity: 16
+ gomodguard:
+ blocked:
+ modules:
+ - github.com/pkg/errors:
+ recommendations:
+ - errors
+ - fmt
+ versions:
+ - github.com/distribution/distribution:
+ reason: use distribution/reference
+ - gotest.tools:
+ version: < 3.0.0
+ reason: deprecated, pre-modules version
+ lll:
+ line-length: 200
+ revive:
+ rules:
+ - name: package-comments
+ disabled: true
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
issues:
- # golangci hides some golint warnings (the warning about exported things
- # withtout documentation for example), this will make it show them anyway.
- exclude-use-default: false
- exclude:
- - should not use dot imports
+ max-issues-per-linter: 0
+ max-same-issues: 0
+formatters:
+ enable:
+ - gofumpt
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/BUILDING.md b/BUILDING.md
index a73fb53eebd..e9861f08140 100644
--- a/BUILDING.md
+++ b/BUILDING.md
@@ -2,14 +2,17 @@
### Prerequisites
* Windows:
- * [Docker Desktop](https://hub.docker.com/editions/community/docker-ce-desktop-windows)
+ * [Docker Desktop](https://docs.docker.com/desktop/setup/install/windows-install/)
* make
+ * go (see [go.mod](go.mod) for minimum version)
* macOS:
- * [Docker Desktop](https://hub.docker.com/editions/community/docker-ce-desktop-mac)
+ * [Docker Desktop](https://docs.docker.com/desktop/setup/install/mac-install/)
* make
+ * go (see [go.mod](go.mod) for minimum version)
* Linux:
* [Docker 20.10 or later](https://docs.docker.com/engine/install/)
* make
+ * go (see [go.mod](go.mod) for minimum version)
### Building the CLI
@@ -19,7 +22,8 @@ Once you have the prerequisites installed, you can build the CLI using:
make
```
-This will output a `docker-compose` CLI plugin for your host machine in `./bin`.
+This will output a `docker-compose` CLI plugin for your host machine in
+`./bin/build`.
You can statically cross compile the CLI for Windows, macOS, and Linux using the
`cross` target.
@@ -34,21 +38,57 @@ make test
If you need to update a golden file simply do `go test ./... -test.update-golden`.
-### End to end tests
+### End-to-end tests
+To run e2e tests, the Compose CLI binary needs to be built. All the commands to run e2e tests propose a version
+with the prefix `build-and-e2e` to first build the CLI before executing tests.
-To run the end to end tests, run:
+Note that this requires a local Docker Engine to be running.
+
+#### Whole end-to-end tests suite
+
+To execute both CLI and standalone e2e tests, run :
+
+```console
+make e2e
+```
+
+Or if you need to build the CLI, run:
+```console
+make build-and-e2e
+```
+
+#### Plugin end-to-end tests suite
+
+To execute CLI plugin e2e tests, run :
```console
make e2e-compose
```
-Note that this requires a local Docker Engine to be running.
+Or if you need to build the CLI, run:
+```console
+make build-and-e2e-compose
+```
+
+#### Standalone end-to-end tests suite
+
+To execute the standalone CLI e2e tests, run :
+
+```console
+make e2e-compose-standalone
+```
+
+Or if you need to build the CLI, run:
+
+```console
+make build-and-e2e-compose-standalone
+```
## Releases
To create a new release:
-* Check that the CI is green on the main branch for commit you want to release
-* Run the release Github Actions workflow with a tag of the form vx.y.z following existing tags.
+* Check that the CI is green on the main branch for the commit you want to release
+* Run the release GitHub Actions workflow with a tag of form vx.y.z following existing tags.
This will automatically create a new tag, release and make binaries for
Windows, macOS, and Linux available for download on the
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 55be2a92b7f..9b80b8abc45 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -2,7 +2,7 @@
Want to hack on Docker? Awesome! We have a contributor's guide that explains
[setting up a Docker development environment and the contribution
-process](https://docs.docker.com/opensource/project/who-written-for/).
+process](https://docs.docker.com/contribute/).
This page contains information about reporting issues as well as some tips and
guidelines useful to experienced open source contributors. Finally, make sure
@@ -11,23 +11,31 @@ start participating.
## Topics
-* [Reporting Security Issues](#reporting-security-issues)
-* [Design and Cleanup Proposals](#design-and-cleanup-proposals)
-* [Reporting Issues](#reporting-other-issues)
-* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines)
-* [Community Guidelines](#docker-community-guidelines)
+- [Contributing to Docker](#contributing-to-docker)
+ - [Topics](#topics)
+ - [Reporting security issues](#reporting-security-issues)
+ - [Reporting other issues](#reporting-other-issues)
+ - [Quick contribution tips and guidelines](#quick-contribution-tips-and-guidelines)
+ - [Pull requests are always welcome](#pull-requests-are-always-welcome)
+ - [Talking to other Docker users and contributors](#talking-to-other-docker-users-and-contributors)
+ - [Conventions](#conventions)
+ - [Merge approval](#merge-approval)
+ - [Sign your work](#sign-your-work)
+ - [How can I become a maintainer?](#how-can-i-become-a-maintainer)
+ - [Docker community guidelines](#docker-community-guidelines)
+ - [Coding Style](#coding-style)
## Reporting security issues
The Docker maintainers take security seriously. If you discover a security
issue, please bring it to their attention right away!
-Please **DO NOT** file a public issue, instead send your report privately to
+Please **DO NOT** file a public issue, instead, send your report privately to
[security@docker.com](mailto:security@docker.com).
-Security reports are greatly appreciated and we will publicly thank you for it.
+Security reports are greatly appreciated and we will publicly thank you for them.
We also like to send gifts—if you're into Docker swag, make sure to let
-us know. We currently do not offer a paid security bounty program, but are not
+us know. We currently do not offer a paid security bounty program but are not
ruling it out in the future.
@@ -39,11 +47,11 @@ and will thank you for it!
Check that [our issue database](https://github.com/docker/compose/labels/Docker%20Compose%20V2)
doesn't already include that problem or suggestion before submitting an issue.
-If you find a match, you can use the "subscribe" button to get notified on
+If you find a match, you can use the "subscribe" button to get notified of
updates. Do *not* leave random "+1" or "I have this too" comments, as they
only clutter the discussion, and don't help to resolve it. However, if you
have ways to reproduce the issue or have additional information that may help
-resolving the issue, please leave a comment.
+resolve the issue, please leave a comment.
When reporting issues, always include:
@@ -51,13 +59,18 @@ When reporting issues, always include:
* The output of `docker context show`.
* The output of `docker info`.
-Also include the steps required to reproduce the problem if possible and
+Also, include the steps required to reproduce the problem if possible and
applicable. This information will help us review and fix your issue faster.
When sending lengthy log files, consider posting them as a gist
(https://gist.github.com).
Don't forget to remove sensitive data from your log files before posting (you
can replace those parts with "REDACTED").
+_Note:_
+Maintainers might request additional information to diagnose an issue,
+if initial reporter doesn't answer within a reasonable delay (a few weeks),
+issue will be closed.
+
## Quick contribution tips and guidelines
This section gives the experienced contributor some tips and guidelines.
@@ -72,8 +85,7 @@ before anybody starts working on it.
We are always thrilled to receive pull requests. We do our best to process them
quickly. If your pull request is not accepted on the first try,
-don't get discouraged! Our contributor's guide explains
-[the review process we use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/).
+don't get discouraged!
### Talking to other Docker users and contributors
@@ -83,7 +95,7 @@ don't get discouraged! Our contributor's guide explains
| Community Slack |
- The Docker Community has a dedicated Slack chat to discuss features and issues. You can sign-up with this link.
+ The Docker Community has a dedicated Slack chat to discuss features and issues. You can sign-up with this link.
|
@@ -106,7 +118,7 @@ don't get discouraged! Our contributor's guide explains
| Stack Overflow |
Stack Overflow has over 17000 Docker questions listed. We regularly
- monitor Docker questions
+ monitor Docker questions
and so do many other knowledgeable Docker users.
|
@@ -124,9 +136,10 @@ Fork the repository and make changes on your fork in a feature branch:
issue.
Submit unit tests for your changes. Go has a great test framework built in; use
-it! Take a look at existing tests for inspiration. [Run the full test
-suite](README.md) on your branch before
-submitting a pull request.
+it! Take a look at existing tests for inspiration. Also, end-to-end tests are
+available. Run the full test suite, both unit tests and e2e tests on your
+branch before submitting a pull request. See [BUILDING.md](BUILDING.md) for
+instructions to build and run tests.
Write clean code. Universally formatted code promotes ease of writing, reading,
and maintenance. Always run `gofmt -s -w file.go` on each changed file before
@@ -144,7 +157,7 @@ suggested modifications and push additional commits to your feature branch. Post
a comment after pushing. New commits show up in the pull request automatically,
but the reviewers are notified only when you comment.
-Pull requests must be cleanly rebased on top of master without multiple branches
+Pull requests must be cleanly rebased on top of the base branch without multiple branches
mixed into the PR.
**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your
@@ -164,7 +177,7 @@ changes in the same pull request so that a revert would remove all traces of
the feature or fix.
Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in the pull
-request description that close an issue. Including references automatically
+request description that closes an issue. Including references automatically
closes the issue on a merge.
Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly
@@ -187,7 +200,7 @@ For more details, see the [MAINTAINERS](MAINTAINERS) page.
The sign-off is a simple line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are pretty simple: if you can certify
-the below (from [developercertificate.org](http://developercertificate.org/)):
+the below (from [developercertificate.org](https://developercertificate.org/)):
```
Developer Certificate of Origin
@@ -239,7 +252,7 @@ commit automatically with `git commit -s`.
### How can I become a maintainer?
The procedures for adding new maintainers are explained in the global
-[MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS)
+[MAINTAINERS](https://github.com/docker/opensource/blob/main/MAINTAINERS)
file in the
[https://github.com/docker/opensource/](https://github.com/docker/opensource/)
repository.
@@ -255,7 +268,7 @@ your help to keep it that way. To help with this we've come up with some general
guidelines for the community as a whole:
* Be nice: Be courteous, respectful and polite to fellow community members:
- no regional, racial, gender, or other abuse will be tolerated. We like
+ no regional, racial, gender or other abuse will be tolerated. We like
nice people way better than mean ones!
* Encourage diversity and participation: Make everyone in our community feel
@@ -269,10 +282,10 @@ guidelines for the community as a whole:
* Stay on topic: Make sure that you are posting to the correct channel and
avoid off-topic discussions. Remember when you update an issue or respond
- to an email you are potentially sending to a large number of people. Please
- consider this before you update. Also remember that nobody likes spam.
+ to an email you are potentially sending it to a large number of people. Please
+ consider this before you update. Also, remember that nobody likes spam.
-* Don't send email to the maintainers: There's no need to send email to the
+* Don't send emails to the maintainers: There's no need to send emails to the
maintainers to ask them to investigate an issue or to take a look at a
pull request. Instead of sending an email, GitHub mentions should be
used to ping maintainers to review a pull request, a proposal or an
@@ -286,7 +299,7 @@ to result in a solid, consistent codebase.
It is possible that the code base does not currently comply with these
guidelines. We are not looking for a massive PR that fixes this, since that
-goes against the spirit of the guidelines. All new contributions should make a
+goes against the spirit of the guidelines. All new contributors should make their
best effort to clean up and make the code base better than they left it.
Obviously, apply your best judgement. Remember, the goal here is to make the
code base easier for humans to navigate and understand. Always keep that in
@@ -298,9 +311,9 @@ The rules:
2. All code should pass the default levels of
[`golint`](https://github.com/golang/lint).
3. All code should follow the guidelines covered in [Effective
- Go](http://golang.org/doc/effective_go.html) and [Go Code Review
- Comments](https://github.com/golang/go/wiki/CodeReviewComments).
-4. Comment the code. Tell us the why, the history and the context.
+ Go](https://go.dev/doc/effective_go) and [Go Code Review
+ Comments](https://go.dev/wiki/CodeReviewComments).
+4. Include code comments. Tell us the why, the history and the context.
5. Document _all_ declarations and methods, even private ones. Declare
expectations, caveats and anything else that may be important. If a type
gets exported, having the comments already there will ensure it's ready.
@@ -321,6 +334,6 @@ The rules:
guidelines. Since you've read all the rules, you now know that.
If you are having trouble getting into the mood of idiomatic Go, we recommend
-reading through [Effective Go](https://golang.org/doc/effective_go.html). The
-[Go Blog](https://blog.golang.org) is also a great resource. Drinking the
+reading through [Effective Go](https://go.dev/doc/effective_go). The
+[Go Blog](https://go.dev/blog/) is also a great resource. Drinking the
kool-aid is a lot easier than going thirsty.
diff --git a/Dockerfile b/Dockerfile
index 548caed5a61..9b47ed8594d 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-# syntax=docker/dockerfile:1.2
+# syntax=docker/dockerfile:1
# Copyright 2020 Docker Compose CLI authors
@@ -15,93 +15,183 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG GO_VERSION=1.17-alpine
-ARG GOLANGCI_LINT_VERSION=v1.40.1-alpine
-ARG PROTOC_GEN_GO_VERSION=v1.4.3
-
-FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION} AS base
-WORKDIR /compose-cli
-RUN apk add --no-cache -vv \
- git \
- docker \
- make \
- protoc \
- protobuf-dev
+ARG GO_VERSION=1.24.9
+ARG XX_VERSION=1.6.1
+ARG GOLANGCI_LINT_VERSION=v2.0.2
+ARG ADDLICENSE_VERSION=v1.0.0
+
+ARG BUILD_TAGS="e2e"
+ARG DOCS_FORMATS="md,yaml"
+ARG LICENSE_FILES=".*\(Dockerfile\|Makefile\|\.go\|\.hcl\|\.sh\)"
+
+# xx is a helper for cross-compilation
+FROM --platform=${BUILDPLATFORM} tonistiigi/xx:${XX_VERSION} AS xx
+
+# osxcross contains the MacOSX cross toolchain for xx
+FROM crazymax/osxcross:11.3-alpine AS osxcross
+
+FROM golangci/golangci-lint:${GOLANGCI_LINT_VERSION}-alpine AS golangci-lint
+FROM ghcr.io/google/addlicense:${ADDLICENSE_VERSION} AS addlicense
+
+FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION}-alpine AS base
+COPY --from=xx / /
+RUN apk add --no-cache \
+ clang \
+ docker \
+ file \
+ findutils \
+ git \
+ make \
+ protoc \
+ protobuf-dev
+WORKDIR /src
+ENV CGO_ENABLED=0
+
+FROM base AS build-base
COPY go.* .
RUN --mount=type=cache,target=/go/pkg/mod \
--mount=type=cache,target=/root/.cache/go-build \
go mod download
-FROM base AS lint
-ENV CGO_ENABLED=0
-COPY --from=golangci/golangci-lint /usr/bin/golangci-lint /usr/bin/golangci-lint
-ARG BUILD_TAGS
-ARG GIT_TAG
-RUN --mount=target=. \
+FROM build-base AS vendored
+RUN --mount=type=bind,target=.,rw \
--mount=type=cache,target=/go/pkg/mod \
- --mount=type=cache,target=/root/.cache/go-build \
- --mount=type=cache,target=/root/.cache/golangci-lint \
- BUILD_TAGS=${BUILD_TAGS} \
- GIT_TAG=${GIT_TAG} \
- make -f builder.Makefile lint
-
-FROM base AS make-compose-plugin
-ENV CGO_ENABLED=0
-ARG TARGETOS
-ARG TARGETARCH
+ go mod tidy && mkdir /out && cp go.mod go.sum /out
+
+FROM scratch AS vendor-update
+COPY --from=vendored /out /
+
+FROM vendored AS vendor-validate
+RUN --mount=type=bind,target=.,rw <&2 'ERROR: Vendor result differs. Please vendor your package with "make go-mod-tidy"'
+ echo "$diff"
+ exit 1
+ fi
+EOT
+
+FROM build-base AS build
ARG BUILD_TAGS
-ARG GIT_TAG
-RUN --mount=target=. \
+ARG BUILD_FLAGS
+ARG TARGETPLATFORM
+RUN --mount=type=bind,target=. \
+ --mount=type=cache,target=/root/.cache \
--mount=type=cache,target=/go/pkg/mod \
- --mount=type=cache,target=/root/.cache/go-build \
- GOOS=${TARGETOS} \
- GOARCH=${TARGETARCH} \
- BUILD_TAGS=${BUILD_TAGS} \
- GIT_TAG=${GIT_TAG} \
- make COMPOSE_BINARY=/out/docker-compose -f builder.Makefile compose-plugin
+ --mount=type=bind,from=osxcross,src=/osxsdk,target=/xx-sdk \
+ xx-go --wrap && \
+ if [ "$(xx-info os)" == "darwin" ]; then export CGO_ENABLED=1; fi && \
+ make build GO_BUILDTAGS="$BUILD_TAGS" DESTDIR=/out && \
+ xx-verify --static /out/docker-compose
-FROM base AS make-cross
+FROM build-base AS lint
ARG BUILD_TAGS
-ARG GIT_TAG
-RUN --mount=target=. \
+ENV GOLANGCI_LINT_CACHE=/cache/golangci-lint
+RUN --mount=type=bind,target=. \
+ --mount=type=cache,target=/root/.cache \
--mount=type=cache,target=/go/pkg/mod \
- --mount=type=cache,target=/root/.cache/go-build \
- BUILD_TAGS=${BUILD_TAGS} \
- GIT_TAG=${GIT_TAG} \
- make COMPOSE_BINARY=/out/docker-compose -f builder.Makefile cross
+ --mount=type=cache,target=/cache/golangci-lint \
+ --mount=from=golangci-lint,source=/usr/bin/golangci-lint,target=/usr/bin/golangci-lint \
+ golangci-lint cache status && \
+ golangci-lint run --build-tags "$BUILD_TAGS" ./...
-FROM scratch AS compose-plugin
-COPY --from=make-compose-plugin /out/* .
-
-FROM scratch AS cross
-COPY --from=make-cross /out/* .
-
-FROM base AS test
-ENV CGO_ENABLED=0
+FROM build-base AS test
+ARG CGO_ENABLED=0
ARG BUILD_TAGS
-ARG GIT_TAG
-RUN --mount=target=. \
+RUN --mount=type=bind,target=. \
+ --mount=type=cache,target=/root/.cache \
--mount=type=cache,target=/go/pkg/mod \
- --mount=type=cache,target=/root/.cache/go-build \
- BUILD_TAGS=${BUILD_TAGS} \
- GIT_TAG=${GIT_TAG} \
- make -f builder.Makefile test
-
-FROM base AS check-license-headers
-RUN go get -u github.com/kunalkushwaha/ltag
+ rm -rf /tmp/coverage && \
+ mkdir -p /tmp/coverage && \
+ rm -rf /tmp/report && \
+ mkdir -p /tmp/report && \
+ go run gotest.tools/gotestsum@latest --format testname --junitfile "/tmp/report/report.xml" -- -tags "$BUILD_TAGS" -v -cover -covermode=atomic $(go list $(TAGS) ./... | grep -vE 'e2e') -args -test.gocoverdir="/tmp/coverage" && \
+ go tool covdata percent -i=/tmp/coverage
+
+FROM scratch AS test-coverage
+COPY --from=test --link /tmp/coverage /
+COPY --from=test --link /tmp/report /
+
+FROM base AS license-set
+ARG LICENSE_FILES
+RUN --mount=type=bind,target=.,rw \
+ --mount=from=addlicense,source=/app/addlicense,target=/usr/bin/addlicense \
+ find . -regex "${LICENSE_FILES}" | xargs addlicense -c 'Docker Compose CLI' -l apache && \
+ mkdir /out && \
+ find . -regex "${LICENSE_FILES}" | cpio -pdm /out
+
+FROM scratch AS license-update
+COPY --from=set /out /
+
+FROM base AS license-validate
+ARG LICENSE_FILES
+RUN --mount=type=bind,target=. \
+ --mount=from=addlicense,source=/app/addlicense,target=/usr/bin/addlicense \
+ find . -regex "${LICENSE_FILES}" | xargs addlicense -check -c 'Docker Compose CLI' -l apache -ignore validate -ignore testdata -ignore resolvepath -v
+
+FROM base AS docsgen
+WORKDIR /src
RUN --mount=target=. \
- make -f builder.Makefile check-license-headers
-
-FROM base AS make-go-mod-tidy
-COPY . .
-RUN --mount=type=cache,target=/go/pkg/mod \
- --mount=type=cache,target=/root/.cache/go-build \
- go mod tidy
-
-FROM scratch AS go-mod-tidy
-COPY --from=make-go-mod-tidy /compose-cli/go.mod .
-COPY --from=make-go-mod-tidy /compose-cli/go.sum .
-
-FROM base AS check-go-mod
-COPY . .
-RUN make -f builder.Makefile check-go-mod
+ --mount=target=/root/.cache,type=cache \
+ --mount=type=cache,target=/go/pkg/mod \
+ go build -o /out/docsgen ./docs/yaml/main/generate.go
+
+FROM --platform=${BUILDPLATFORM} alpine AS docs-build
+RUN apk add --no-cache rsync git
+WORKDIR /src
+COPY --from=docsgen /out/docsgen /usr/bin
+ARG DOCS_FORMATS
+RUN --mount=target=/context \
+ --mount=target=.,type=tmpfs <&2 'ERROR: Docs result differs. Please update with "make docs"'
+ git status --porcelain -- docs/reference
+ exit 1
+ fi
+EOT
+
+FROM scratch AS binary-unix
+COPY --link --from=build /out/docker-compose /
+FROM binary-unix AS binary-darwin
+FROM binary-unix AS binary-linux
+FROM scratch AS binary-windows
+COPY --link --from=build /out/docker-compose /docker-compose.exe
+FROM binary-$TARGETOS AS binary
+# enable scanning for this stage
+ARG BUILDKIT_SBOM_SCAN_STAGE=true
+
+FROM --platform=$BUILDPLATFORM alpine AS releaser
+WORKDIR /work
+ARG TARGETOS
+ARG TARGETARCH
+ARG TARGETVARIANT
+RUN --mount=from=binary \
+ mkdir -p /out && \
+ # TODO: should just use standard arch
+ TARGETARCH=$([ "$TARGETARCH" = "amd64" ] && echo "x86_64" || echo "$TARGETARCH"); \
+ TARGETARCH=$([ "$TARGETARCH" = "arm64" ] && echo "aarch64" || echo "$TARGETARCH"); \
+ cp docker-compose* "/out/docker-compose-${TARGETOS}-${TARGETARCH}${TARGETVARIANT}$(ls docker-compose* | sed -e 's/^docker-compose//')"
+
+FROM scratch AS release
+COPY --from=releaser /out/ /
diff --git a/MAINTAINERS b/MAINTAINERS
deleted file mode 100644
index 819532f2b40..00000000000
--- a/MAINTAINERS
+++ /dev/null
@@ -1,63 +0,0 @@
-# Docker maintainers file
-#
-# This file describes who runs the docker/compose-cli project and how.
-# This is a living document - if you see something out of date or missing, speak up!
-#
-# It is structured to be consumable by both humans and programs.
-# To extract its contents programmatically, use any TOML-compliant
-# parser.
-#
-# This file is compiled into the MAINTAINERS file in docker/opensource.
-#
-[Org]
-
- [Org."Core maintainers"]
-
- # The Core maintainers are the ghostbusters of the project: when there's a problem others
- # can't solve, they show up and fix it with bizarre devices and weaponry.
- # They have final say on technical implementation and coding style.
- # They are ultimately responsible for quality in all its forms: usability polish,
- # bugfixes, performance, stability, etc. When ownership can cleanly be passed to
- # a subsystem, they are responsible for doing so and holding the
- # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners.
-
- people = [
- "rumpl",
- "gtardif",
- "ndeloof",
- "chris-crone",
- "ulyssessouza"
- ]
-
-[people]
-
-# A reference list of all people associated with the project.
-# All other sections should refer to people by their canonical key
-# in the people section.
-
- # ADD YOURSELF HERE IN ALPHABETICAL ORDER
-
- [people.chris-crone]
- Name = "Christopher Crone"
- Email = "christopher.crone@docker.com"
- GitHub = "chris-crone"
-
- [people.gtardif]
- Name = "Guillaume Tardif"
- Email = "guillaume.tardif@docker.com"
- GitHub = "gtardif"
-
- [people.ndeloof]
- Name = "Nicolas Deloof"
- Email = "nicolas.deloof@docker.com"
- GitHub = "ndeloof"
-
- [people.rumpl]
- Name = "Djordje Lukic"
- Email = "djordje.lukic@docker.com"
- GitHub = "rumpl"
-
- [people.ulyssessouza]
- Name = "Ulysses Souza"
- Email = "/dev/null 2>&1 || go install go.uber.org/mock/mockgen@v0.4.0
mockgen -destination pkg/mocks/mock_docker_cli.go -package mocks github.com/docker/cli/cli/command Cli
mockgen -destination pkg/mocks/mock_docker_api.go -package mocks github.com/docker/docker/client APIClient
+ mockgen -destination pkg/mocks/mock_docker_compose_api.go -package mocks -source=./pkg/api/api.go Service
.PHONY: e2e
e2e: e2e-compose e2e-compose-standalone ## Run end to end local tests in both modes. Set E2E_TEST=TestName to run a single test
+.PHONY: build-and-e2e
+build-and-e2e: build e2e-compose e2e-compose-standalone ## Compile the compose cli-plugin and run end to end local tests in both modes. Set E2E_TEST=TestName to run a single test
+
.PHONY: cross
cross: ## Compile the CLI for linux, darwin and windows
- @docker build . --target cross \
- --build-arg BUILD_TAGS \
- --build-arg GIT_TAG=$(GIT_TAG) \
- --output ./bin \
+ $(BUILDX_CMD) bake binary-cross
.PHONY: test
test: ## Run unit tests
- @docker build --progress=plain . \
- --build-arg BUILD_TAGS=kube \
- --build-arg GIT_TAG=$(GIT_TAG) \
- --target test
+ $(BUILDX_CMD) bake test
.PHONY: cache-clear
cache-clear: ## Clear the builder cache
- @docker builder prune --force --filter type=exec.cachemount --filter=unused-for=24h
+ $(BUILDX_CMD) prune --force --filter type=exec.cachemount --filter=unused-for=24h
.PHONY: lint
lint: ## run linter(s)
- @docker build . \
- --build-arg BUILD_TAGS=kube,e2e \
- --build-arg GIT_TAG=$(GIT_TAG) \
- --target lint
+ $(BUILDX_CMD) bake lint
+
+.PHONY: fmt
+fmt:
+ gofumpt --version >/dev/null 2>&1 || go install mvdan.cc/gofumpt@latest
+ gofumpt -w .
.PHONY: docs
docs: ## generate documentation
- $(eval $@_TMP_OUT := $(shell mktemp -d -t dockercli-output.XXXXXXXXXX))
- docker build . \
- --output type=local,dest=$($@_TMP_OUT) \
- -f ./docs/docs.Dockerfile \
- --target update
+ $(eval $@_TMP_OUT := $(shell mktemp -d -t compose-output.XXXXXXXXXX))
+ $(BUILDX_CMD) bake --set "*.output=type=local,dest=$($@_TMP_OUT)" docs-update
rm -rf ./docs/internal
- cp -R "$($@_TMP_OUT)"/out/* ./docs/
- rm -rf "$($@_TMP_OUT)"/*
+ cp -R "$(DRIVE_PREFIX)$($@_TMP_OUT)"/out/* ./docs/
+ rm -rf "$(DRIVE_PREFIX)$($@_TMP_OUT)"/*
.PHONY: validate-docs
validate-docs: ## validate the doc does not change
- @docker build . \
- -f ./docs/docs.Dockerfile \
- --target validate
+ $(BUILDX_CMD) bake docs-validate
.PHONY: check-dependencies
check-dependencies: ## check dependency updates
@@ -105,19 +143,19 @@ check-dependencies: ## check dependency updates
.PHONY: validate-headers
validate-headers: ## Check license header for all files
- @docker build . --target check-license-headers
+ $(BUILDX_CMD) bake license-validate
.PHONY: go-mod-tidy
go-mod-tidy: ## Run go mod tidy in a container and output resulting go.mod and go.sum
- @docker build . --target go-mod-tidy --output .
+ $(BUILDX_CMD) bake vendor-update
.PHONY: validate-go-mod
validate-go-mod: ## Validate go.mod and go.sum are up-to-date
- @docker build . --target check-go-mod
+ $(BUILDX_CMD) bake vendor-validate
-validate: validate-go-mod validate-headers validate-docs ## Validate sources
+validate: validate-go-mod validate-headers validate-docs ## Validate sources
-pre-commit: validate check-dependencies lint compose-plugin test e2e-compose
+pre-commit: validate check-dependencies lint build test e2e-compose
help: ## Show help
@echo Please specify a build target. The choices are:
diff --git a/README.md b/README.md
index 8e6871b244e..45f900d3d25 100644
--- a/README.md
+++ b/README.md
@@ -1,28 +1,40 @@
+# Table of Contents
+- [Docker Compose v2](#docker-compose-v2)
+- [Where to get Docker Compose](#where-to-get-docker-compose)
+ + [Windows and macOS](#windows-and-macos)
+ + [Linux](#linux)
+- [Quick Start](#quick-start)
+- [Contributing](#contributing)
+- [Legacy](#legacy)
# Docker Compose v2
-[](https://github.com/docker/compose/actions)
-
+[](https://github.com/docker/compose/releases/latest)
+[](https://pkg.go.dev/github.com/docker/compose/v5)
+[](https://github.com/docker/compose/actions?query=workflow%3Aci)
+[](https://goreportcard.com/report/github.com/docker/compose/v5)
+[](https://codecov.io/gh/docker/compose)
+[](https://api.securityscorecards.dev/projects/github.com/docker/compose)

Docker Compose is a tool for running multi-container applications on Docker
defined using the [Compose file format](https://compose-spec.io).
-A Compose file is used to define how the one or more containers that make up
+A Compose file is used to define how one or more containers that make up
your application are configured.
Once you have a Compose file, you can create and start your application with a
single command: `docker compose up`.
-# About update and backward compatibility
-
-Docker Compose V2 is a major version bump release of Docker Compose. It has been completely rewritten from scratch in Golang (V1 was in Python). The installation instructions for Compose V2 differ from V1. V2 is not a standalone binary anymore, and installation scripts will have to be adjusted. Some commands are different.
-
-For a smooth transition from legacy docker-compose 1.xx, please consider installing [compose-switch](https://github.com/docker/compose-switch) to translate `docker-compose ...` commands into Compose V2's `docker compose .... `. Also check V2's `--compatibility` flag.
+> **Note**: About Docker Swarm
+> Docker Swarm used to rely on the legacy compose file format but did not adopted the compose specification
+> so is missing some of the recent enhancements in the compose syntax. After
+> [acquisition by Mirantis](https://www.mirantis.com/software/swarm/) swarm isn't maintained by Docker Inc, and
+> as such some Docker Compose features aren't accessible to swarm users.
# Where to get Docker Compose
### Windows and macOS
Docker Compose is included in
-[Docker Desktop](https://www.docker.com/products/docker-desktop)
+[Docker Desktop](https://www.docker.com/products/docker-desktop/)
for Windows and macOS.
### Linux
@@ -30,23 +42,23 @@ for Windows and macOS.
You can download Docker Compose binaries from the
[release page](https://github.com/docker/compose/releases) on this repository.
-Rename the relevant binary for your OS to `docker-compose` and copy it to `$HOME/.docker/cli-plugins`
+Rename the relevant binary for your OS to `docker-compose` and copy it to `$HOME/.docker/cli-plugins`
-Or copy it into one of these folders for installing it system-wide:
+Or copy it into one of these folders to install it system-wide:
* `/usr/local/lib/docker/cli-plugins` OR `/usr/local/libexec/docker/cli-plugins`
* `/usr/lib/docker/cli-plugins` OR `/usr/libexec/docker/cli-plugins`
-(might require to make the downloaded file executable with `chmod +x`)
+(might require making the downloaded file executable with `chmod +x`)
Quick Start
-----------
-Using Docker Compose is basically a three-step process:
+Using Docker Compose is a three-step process:
1. Define your app's environment with a `Dockerfile` so it can be
reproduced anywhere.
-2. Define the services that make up your app in `docker-compose.yml` so
+2. Define the services that make up your app in `compose.yaml` so
they can be run together in an isolated environment.
3. Lastly, run `docker compose up` and Compose will start and run your entire
app.
@@ -73,3 +85,8 @@ Want to help develop Docker Compose? Check out our
If you find an issue, please report it on the
[issue tracker](https://github.com/docker/compose/issues/new/choose).
+
+Legacy
+-------------
+
+The Python version of Compose is available under the `v1` [branch](https://github.com/docker/compose/tree/v1).
diff --git a/builder.Makefile b/builder.Makefile
deleted file mode 100644
index 3509e61f18a..00000000000
--- a/builder.Makefile
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2020 Docker Compose CLI authors
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-GOOS?=$(shell go env GOOS)
-GOARCH?=$(shell go env GOARCH)
-
-PKG_NAME := github.com/docker/compose/v2
-
-EXTENSION:=
-ifeq ($(GOOS),windows)
- EXTENSION:=.exe
-endif
-
-STATIC_FLAGS=CGO_ENABLED=0
-
-GIT_TAG?=$(shell git describe --tags --match "v[0-9]*")
-
-LDFLAGS="-s -w -X $(PKG_NAME)/internal.Version=${GIT_TAG}"
-GO_BUILD=$(STATIC_FLAGS) go build -trimpath -ldflags=$(LDFLAGS)
-
-COMPOSE_BINARY?=bin/docker-compose
-COMPOSE_BINARY_WITH_EXTENSION=$(COMPOSE_BINARY)$(EXTENSION)
-
-WORK_DIR:=$(shell mktemp -d)
-
-TAGS:=
-ifdef BUILD_TAGS
- TAGS=-tags $(BUILD_TAGS)
- LINT_TAGS=--build-tags $(BUILD_TAGS)
-endif
-
-.PHONY: compose-plugin
-compose-plugin:
- GOOS=${GOOS} GOARCH=${GOARCH} $(GO_BUILD) $(TAGS) -o $(COMPOSE_BINARY_WITH_EXTENSION) ./cmd
-
-.PHONY: cross
-cross:
- GOOS=linux GOARCH=amd64 $(GO_BUILD) $(TAGS) -o $(COMPOSE_BINARY)-linux-x86_64 ./cmd
- GOOS=linux GOARCH=arm64 $(GO_BUILD) $(TAGS) -o $(COMPOSE_BINARY)-linux-aarch64 ./cmd
- GOOS=linux GOARM=6 GOARCH=arm $(GO_BUILD) $(TAGS) -o $(COMPOSE_BINARY)-linux-armv6 ./cmd
- GOOS=linux GOARM=7 GOARCH=arm $(GO_BUILD) $(TAGS) -o $(COMPOSE_BINARY)-linux-armv7 ./cmd
- GOOS=linux GOARCH=s390x $(GO_BUILD) $(TAGS) -o $(COMPOSE_BINARY)-linux-s390x ./cmd
- GOOS=darwin GOARCH=amd64 $(GO_BUILD) $(TAGS) -o $(COMPOSE_BINARY)-darwin-x86_64 ./cmd
- GOOS=darwin GOARCH=arm64 $(GO_BUILD) $(TAGS) -o $(COMPOSE_BINARY)-darwin-aarch64 ./cmd
- GOOS=windows GOARCH=amd64 $(GO_BUILD) $(TAGS) -o $(COMPOSE_BINARY)-windows-x86_64.exe ./cmd
-
-.PHONY: test
-test:
- go test $(TAGS) -cover $(shell go list $(TAGS) ./... | grep -vE 'e2e')
-
-.PHONY: lint
-lint:
- golangci-lint run $(LINT_TAGS) --timeout 10m0s ./...
-
-.PHONY: check-license-headers
-check-license-headers:
- ./scripts/validate/fileheader
-
-.PHONY: check-go-mod
-check-go-mod:
- ./scripts/validate/check-go-mod
diff --git a/cmd/cmdtrace/cmd_span.go b/cmd/cmdtrace/cmd_span.go
new file mode 100644
index 00000000000..365f95f7ca6
--- /dev/null
+++ b/cmd/cmdtrace/cmd_span.go
@@ -0,0 +1,147 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package cmdtrace
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+
+ dockercli "github.com/docker/cli/cli"
+ "github.com/docker/cli/cli/command"
+ commands "github.com/docker/compose/v5/cmd/compose"
+ "github.com/docker/compose/v5/internal/tracing"
+ "github.com/spf13/cobra"
+ flag "github.com/spf13/pflag"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Setup should be called as part of the command's PersistentPreRunE
+// as soon as possible after initializing the dockerCli.
+//
+// It initializes the tracer for the CLI using both auto-detection
+// from the Docker context metadata as well as standard OTEL_ env
+// vars, creates a root span for the command, and wraps the actual
+// command invocation to ensure the span is properly finalized and
+// exported before exit.
+func Setup(cmd *cobra.Command, dockerCli command.Cli, args []string) error {
+ tracingShutdown, err := tracing.InitTracing(dockerCli)
+ if err != nil {
+ return fmt.Errorf("initializing tracing: %w", err)
+ }
+
+ ctx := cmd.Context()
+ ctx, cmdSpan := otel.Tracer("").Start(
+ ctx,
+ "cli/"+strings.Join(commandName(cmd), "-"),
+ )
+ cmdSpan.SetAttributes(
+ attribute.StringSlice("cli.flags", getFlags(cmd.Flags())),
+ attribute.Bool("cli.isatty", dockerCli.In().IsTerminal()),
+ )
+
+ cmd.SetContext(ctx)
+ wrapRunE(cmd, cmdSpan, tracingShutdown)
+ return nil
+}
+
+// wrapRunE injects a wrapper function around the command's actual RunE (or Run)
+// method. This is necessary to capture the command result for reporting as well
+// as flushing any spans before exit.
+//
+// Unfortunately, PersistentPostRun(E) can't be used for this purpose because it
+// only runs if RunE does _not_ return an error, but this should run unconditionally.
+func wrapRunE(c *cobra.Command, cmdSpan trace.Span, tracingShutdown tracing.ShutdownFunc) {
+ origRunE := c.RunE
+ if origRunE == nil {
+ origRun := c.Run
+ //nolint:unparam // wrapper function for RunE, always returns nil by design
+ origRunE = func(cmd *cobra.Command, args []string) error {
+ origRun(cmd, args)
+ return nil
+ }
+ c.Run = nil
+ }
+
+ c.RunE = func(cmd *cobra.Command, args []string) error {
+ cmdErr := origRunE(cmd, args)
+ if cmdSpan != nil {
+ if cmdErr != nil && !errors.Is(cmdErr, context.Canceled) {
+ // default exit code is 1 if a more descriptive error
+ // wasn't returned
+ exitCode := 1
+ var statusErr dockercli.StatusError
+ if errors.As(cmdErr, &statusErr) {
+ exitCode = statusErr.StatusCode
+ }
+ cmdSpan.SetStatus(codes.Error, "CLI command returned error")
+ cmdSpan.RecordError(cmdErr, trace.WithAttributes(
+ attribute.Int("exit_code", exitCode),
+ ))
+
+ } else {
+ cmdSpan.SetStatus(codes.Ok, "")
+ }
+ cmdSpan.End()
+ }
+ if tracingShutdown != nil {
+ // use background for root context because the cmd's context might have
+ // been canceled already
+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ defer cancel()
+ // TODO(milas): add an env var to enable logging from the
+ // OTel components for debugging purposes
+ _ = tracingShutdown(ctx)
+ }
+ return cmdErr
+ }
+}
+
+// commandName returns the path components for a given command,
+// in reverse alphabetical order for consistent usage metrics.
+//
+// The root Compose command and anything before (i.e. "docker")
+// are not included.
+//
+// For example:
+// - docker compose alpha watch -> [watch, alpha]
+// - docker-compose up -> [up]
+func commandName(cmd *cobra.Command) []string {
+ var name []string
+ for c := cmd; c != nil; c = c.Parent() {
+ if c.Name() == commands.PluginName {
+ break
+ }
+ name = append(name, c.Name())
+ }
+ sort.Sort(sort.Reverse(sort.StringSlice(name)))
+ return name
+}
+
+func getFlags(fs *flag.FlagSet) []string {
+ var result []string
+ fs.Visit(func(flag *flag.Flag) {
+ result = append(result, flag.Name)
+ })
+ return result
+}
diff --git a/cmd/cmdtrace/cmd_span_test.go b/cmd/cmdtrace/cmd_span_test.go
new file mode 100644
index 00000000000..0a3c8efecfc
--- /dev/null
+++ b/cmd/cmdtrace/cmd_span_test.go
@@ -0,0 +1,112 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package cmdtrace
+
+import (
+ "reflect"
+ "testing"
+
+ commands "github.com/docker/compose/v5/cmd/compose"
+ "github.com/spf13/cobra"
+ flag "github.com/spf13/pflag"
+)
+
+func TestGetFlags(t *testing.T) {
+ // Initialize flagSet with flags
+ fs := flag.NewFlagSet("up", flag.ContinueOnError)
+ var (
+ detach string
+ timeout string
+ )
+ fs.StringVar(&detach, "detach", "d", "")
+ fs.StringVar(&timeout, "timeout", "t", "")
+ _ = fs.Set("detach", "detach")
+ _ = fs.Set("timeout", "timeout")
+
+ tests := []struct {
+ name string
+ input *flag.FlagSet
+ expected []string
+ }{
+ {
+ name: "NoFlags",
+ input: flag.NewFlagSet("NoFlags", flag.ContinueOnError),
+ expected: nil,
+ },
+ {
+ name: "Flags",
+ input: fs,
+ expected: []string{"detach", "timeout"},
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ result := getFlags(test.input)
+ if !reflect.DeepEqual(result, test.expected) {
+ t.Errorf("Expected %v, but got %v", test.expected, result)
+ }
+ })
+ }
+}
+
+func TestCommandName(t *testing.T) {
+ tests := []struct {
+ name string
+ setupCmd func() *cobra.Command
+ want []string
+ }{
+ {
+ name: "docker compose alpha watch -> [watch, alpha]",
+ setupCmd: func() *cobra.Command {
+ dockerCmd := &cobra.Command{Use: "docker"}
+ composeCmd := &cobra.Command{Use: commands.PluginName}
+ alphaCmd := &cobra.Command{Use: "alpha"}
+ watchCmd := &cobra.Command{Use: "watch"}
+
+ dockerCmd.AddCommand(composeCmd)
+ composeCmd.AddCommand(alphaCmd)
+ alphaCmd.AddCommand(watchCmd)
+
+ return watchCmd
+ },
+ want: []string{"watch", "alpha"},
+ },
+ {
+ name: "docker-compose up -> [up]",
+ setupCmd: func() *cobra.Command {
+ dockerComposeCmd := &cobra.Command{Use: commands.PluginName}
+ upCmd := &cobra.Command{Use: "up"}
+
+ dockerComposeCmd.AddCommand(upCmd)
+
+ return upCmd
+ },
+ want: []string{"up"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cmd := tt.setupCmd()
+ got := commandName(cmd)
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("commandName() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/cmd/compatibility/convert.go b/cmd/compatibility/convert.go
index 27a57063b73..78d9b8303c5 100644
--- a/cmd/compatibility/convert.go
+++ b/cmd/compatibility/convert.go
@@ -19,10 +19,18 @@ package compatibility
import (
"fmt"
"os"
+ "strings"
- "github.com/docker/compose/v2/cmd/compose"
+ "github.com/docker/compose/v5/cmd/compose"
)
+func getCompletionCommands() []string {
+ return []string{
+ "__complete",
+ "__completeNoDesc",
+ }
+}
+
func getBoolFlags() []string {
return []string{
"--debug", "-D",
@@ -48,39 +56,50 @@ func Convert(args []string) []string {
var rootFlags []string
command := []string{compose.PluginName}
l := len(args)
+ARGS:
for i := 0; i < l; i++ {
arg := args[i]
- if arg[0] != '-' {
- // not a top-level flag anymore, keep the rest of the command unmodified
- if arg == compose.PluginName {
- i++
- }
+ if contains(getCompletionCommands(), arg) {
+ command = append([]string{arg}, command...)
+ continue
+ }
+ if arg != "" && arg[0] != '-' {
command = append(command, args[i:]...)
break
}
- if arg == "--verbose" {
+
+ switch arg {
+ case "--verbose":
arg = "--debug"
- }
- if arg == "-h" {
+ case "-h":
// docker cli has deprecated -h to avoid ambiguity with -H, while docker-compose still support it
arg = "--help"
- }
- if arg == "--version" || arg == "-v" {
+ case "--version", "-v":
// redirect --version pseudo-command to actual command
arg = "version"
}
+
if contains(getBoolFlags(), arg) {
rootFlags = append(rootFlags, arg)
continue
}
- if contains(getStringFlags(), arg) {
- i++
- if i >= l {
- fmt.Fprintf(os.Stderr, "flag needs an argument: '%s'\n", arg)
- os.Exit(1)
+ for _, flag := range getStringFlags() {
+ if arg == flag {
+ i++
+ if i >= l {
+ fmt.Fprintf(os.Stderr, "flag needs an argument: '%s'\n", arg)
+ os.Exit(1)
+ }
+ rootFlags = append(rootFlags, arg, args[i])
+ continue ARGS
+ }
+ if strings.HasPrefix(arg, flag) {
+ _, val, found := strings.Cut(arg, "=")
+ if found {
+ rootFlags = append(rootFlags, flag, val)
+ continue ARGS
+ }
}
- rootFlags = append(rootFlags, arg, args[i])
- continue
}
command = append(command, arg)
}
diff --git a/cmd/compatibility/convert_test.go b/cmd/compatibility/convert_test.go
index 68fc66de7a4..ae01665e92a 100644
--- a/cmd/compatibility/convert_test.go
+++ b/cmd/compatibility/convert_test.go
@@ -17,6 +17,9 @@
package compatibility
import (
+ "errors"
+ "os"
+ "os/exec"
"testing"
"gotest.tools/v3/assert"
@@ -24,9 +27,10 @@ import (
func Test_convert(t *testing.T) {
tests := []struct {
- name string
- args []string
- want []string
+ name string
+ args []string
+ want []string
+ wantErr bool
}{
{
name: "compose only",
@@ -38,16 +42,31 @@ func Test_convert(t *testing.T) {
args: []string{"--context", "foo", "-f", "compose.yaml", "up"},
want: []string{"--context", "foo", "compose", "-f", "compose.yaml", "up"},
},
+ {
+ name: "with context arg",
+ args: []string{"--context=foo", "-f", "compose.yaml", "up"},
+ want: []string{"--context", "foo", "compose", "-f", "compose.yaml", "up"},
+ },
{
name: "with host",
args: []string{"--host", "tcp://1.2.3.4", "up"},
want: []string{"--host", "tcp://1.2.3.4", "compose", "up"},
},
+ {
+ name: "compose --verbose",
+ args: []string{"--verbose"},
+ want: []string{"--debug", "compose"},
+ },
{
name: "compose --version",
args: []string{"--version"},
want: []string{"compose", "version"},
},
+ {
+ name: "compose -v",
+ args: []string{"-v"},
+ want: []string{"compose", "version"},
+ },
{
name: "help",
args: []string{"-h"},
@@ -68,11 +87,46 @@ func Test_convert(t *testing.T) {
args: []string{"--log-level", "INFO", "up"},
want: []string{"--log-level", "INFO", "compose", "up"},
},
+ {
+ name: "empty string argument",
+ args: []string{"--project-directory", "", "ps"},
+ want: []string{"compose", "--project-directory", "", "ps"},
+ },
+ {
+ name: "compose as project name",
+ args: []string{"--project-name", "compose", "down", "--remove-orphans"},
+ want: []string{"compose", "--project-name", "compose", "down", "--remove-orphans"},
+ },
+ {
+ name: "completion command",
+ args: []string{"__complete", "up"},
+ want: []string{"__complete", "compose", "up"},
+ },
+ {
+ name: "string flag without argument",
+ args: []string{"--log-level"},
+ wantErr: true,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got := Convert(tt.args)
- assert.DeepEqual(t, tt.want, got)
+ if tt.wantErr {
+ if os.Getenv("BE_CRASHER") == "1" {
+ Convert(tt.args)
+ return
+ }
+ cmd := exec.Command(os.Args[0], "-test.run=^"+t.Name()+"$")
+ cmd.Env = append(os.Environ(), "BE_CRASHER=1")
+ err := cmd.Run()
+ var e *exec.ExitError
+ if errors.As(err, &e) && !e.Success() {
+ return
+ }
+ t.Fatalf("process ran with err %v, want exit status 1", err)
+ } else {
+ got := Convert(tt.args)
+ assert.DeepEqual(t, tt.want, got)
+ }
})
}
}
diff --git a/cmd/compose/alpha.go b/cmd/compose/alpha.go
new file mode 100644
index 00000000000..8acc969ca30
--- /dev/null
+++ b/cmd/compose/alpha.go
@@ -0,0 +1,37 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "github.com/docker/cli/cli/command"
+ "github.com/spf13/cobra"
+)
+
+// alphaCommand groups all experimental subcommands
+func alphaCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
+ cmd := &cobra.Command{
+ Short: "Experimental commands",
+ Use: "alpha [COMMAND]",
+ Hidden: true,
+ Annotations: map[string]string{
+ "experimentalCLI": "true",
+ },
+ }
+ cmd.AddCommand(
+ vizCommand(p, dockerCli, backendOptions),
+ publishCommand(p, dockerCli, backendOptions),
+ generateCommand(p, dockerCli, backendOptions),
+ )
+ return cmd
+}
diff --git a/cmd/compose/attach.go b/cmd/compose/attach.go
new file mode 100644
index 00000000000..bef1b465722
--- /dev/null
+++ b/cmd/compose/attach.go
@@ -0,0 +1,85 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
+ "github.com/spf13/cobra"
+)
+
+type attachOpts struct {
+ *composeOptions
+
+ service string
+ index int
+
+ detachKeys string
+ noStdin bool
+ proxy bool
+}
+
+func attachCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
+ opts := attachOpts{
+ composeOptions: &composeOptions{
+ ProjectOptions: p,
+ },
+ }
+ runCmd := &cobra.Command{
+ Use: "attach [OPTIONS] SERVICE",
+ Short: "Attach local standard input, output, and error streams to a service's running container",
+ Args: cobra.MinimumNArgs(1),
+ PreRunE: Adapt(func(ctx context.Context, args []string) error {
+ opts.service = args[0]
+ return nil
+ }),
+ RunE: Adapt(func(ctx context.Context, args []string) error {
+ return runAttach(ctx, dockerCli, backendOptions, opts)
+ }),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
+ }
+
+ runCmd.Flags().IntVar(&opts.index, "index", 0, "index of the container if service has multiple replicas.")
+ runCmd.Flags().StringVarP(&opts.detachKeys, "detach-keys", "", "", "Override the key sequence for detaching from a container.")
+
+ runCmd.Flags().BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN")
+ runCmd.Flags().BoolVar(&opts.proxy, "sig-proxy", true, "Proxy all received signals to the process")
+ return runCmd
+}
+
+func runAttach(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts attachOpts) error {
+ projectName, err := opts.toProjectName(ctx, dockerCli)
+ if err != nil {
+ return err
+ }
+
+ attachOpts := api.AttachOptions{
+ Service: opts.service,
+ Index: opts.index,
+ DetachKeys: opts.detachKeys,
+ NoStdin: opts.noStdin,
+ Proxy: opts.proxy,
+ }
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+ return backend.Attach(ctx, projectName, attachOpts)
+}
diff --git a/cmd/compose/bridge.go b/cmd/compose/bridge.go
new file mode 100644
index 00000000000..fcc780844bd
--- /dev/null
+++ b/cmd/compose/bridge.go
@@ -0,0 +1,155 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/distribution/reference"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/pkg/stringid"
+ "github.com/docker/go-units"
+ "github.com/spf13/cobra"
+
+ "github.com/docker/compose/v5/cmd/formatter"
+ "github.com/docker/compose/v5/pkg/bridge"
+ "github.com/docker/compose/v5/pkg/compose"
+)
+
+func bridgeCommand(p *ProjectOptions, dockerCli command.Cli) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "bridge CMD [OPTIONS]",
+ Short: "Convert compose files into another model",
+ TraverseChildren: true,
+ }
+ cmd.AddCommand(
+ convertCommand(p, dockerCli),
+ transformersCommand(dockerCli),
+ )
+ return cmd
+}
+
+func convertCommand(p *ProjectOptions, dockerCli command.Cli) *cobra.Command {
+ convertOpts := bridge.ConvertOptions{}
+ cmd := &cobra.Command{
+ Use: "convert",
+ Short: "Convert compose files to Kubernetes manifests, Helm charts, or another model",
+ RunE: Adapt(func(ctx context.Context, args []string) error {
+ return runConvert(ctx, dockerCli, p, convertOpts)
+ }),
+ }
+ flags := cmd.Flags()
+ flags.StringVarP(&convertOpts.Output, "output", "o", "out", "The output directory for the Kubernetes resources")
+ flags.StringArrayVarP(&convertOpts.Transformations, "transformation", "t", nil, "Transformation to apply to compose model (default: docker/compose-bridge-kubernetes)")
+ flags.StringVar(&convertOpts.Templates, "templates", "", "Directory containing transformation templates")
+ return cmd
+}
+
+func runConvert(ctx context.Context, dockerCli command.Cli, p *ProjectOptions, opts bridge.ConvertOptions) error {
+ backend, err := compose.NewComposeService(dockerCli)
+ if err != nil {
+ return err
+ }
+
+ project, _, err := p.ToProject(ctx, dockerCli, backend, nil)
+ if err != nil {
+ return err
+ }
+ return bridge.Convert(ctx, dockerCli, project, opts)
+}
+
+func transformersCommand(dockerCli command.Cli) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "transformations CMD [OPTIONS]",
+ Short: "Manage transformation images",
+ }
+ cmd.AddCommand(
+ listTransformersCommand(dockerCli),
+ createTransformerCommand(dockerCli),
+ )
+ return cmd
+}
+
+func listTransformersCommand(dockerCli command.Cli) *cobra.Command {
+ options := lsOptions{}
+ cmd := &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ Short: "List available transformations",
+ RunE: Adapt(func(ctx context.Context, args []string) error {
+ transformers, err := bridge.ListTransformers(ctx, dockerCli)
+ if err != nil {
+ return err
+ }
+ return displayTransformer(dockerCli, transformers, options)
+ }),
+ }
+ cmd.Flags().StringVar(&options.Format, "format", "table", "Format the output. Values: [table | json]")
+ cmd.Flags().BoolVarP(&options.Quiet, "quiet", "q", false, "Only display transformer names")
+ return cmd
+}
+
+func displayTransformer(dockerCli command.Cli, transformers []image.Summary, options lsOptions) error {
+ if options.Quiet {
+ for _, t := range transformers {
+ if len(t.RepoTags) > 0 {
+ _, _ = fmt.Fprintln(dockerCli.Out(), t.RepoTags[0])
+ } else {
+ _, _ = fmt.Fprintln(dockerCli.Out(), t.ID)
+ }
+ }
+ return nil
+ }
+ return formatter.Print(transformers, options.Format, dockerCli.Out(),
+ func(w io.Writer) {
+ for _, img := range transformers {
+ id := stringid.TruncateID(img.ID)
+ size := units.HumanSizeWithPrecision(float64(img.Size), 3)
+ repo, tag := "", ""
+ if len(img.RepoTags) > 0 {
+ ref, err := reference.ParseDockerRef(img.RepoTags[0])
+ if err == nil {
+ // ParseDockerRef will reject a local image ID
+ repo = reference.FamiliarName(ref)
+ if tagged, ok := ref.(reference.Tagged); ok {
+ tag = tagged.Tag()
+ }
+ }
+ }
+
+ _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", id, repo, tag, size)
+ }
+ },
+ "IMAGE ID", "REPO", "TAGS", "SIZE")
+}
+
+func createTransformerCommand(dockerCli command.Cli) *cobra.Command {
+ var opts bridge.CreateTransformerOptions
+ cmd := &cobra.Command{
+ Use: "create [OPTION] PATH",
+ Short: "Create a new transformation",
+ RunE: Adapt(func(ctx context.Context, args []string) error {
+ opts.Dest = args[0]
+ return bridge.CreateTransformer(ctx, dockerCli, opts)
+ }),
+ }
+ cmd.Flags().StringVarP(&opts.From, "from", "f", "", "Existing transformation to copy (default: docker/compose-bridge-kubernetes)")
+ return cmd
+}
diff --git a/cmd/compose/build.go b/cmd/compose/build.go
index 298d3b67518..0a710a9eaeb 100644
--- a/cmd/compose/build.go
+++ b/cmd/compose/build.go
@@ -22,117 +22,157 @@ import (
"os"
"strings"
- "github.com/compose-spec/compose-go/cli"
- "github.com/compose-spec/compose-go/loader"
- "github.com/compose-spec/compose-go/types"
- buildx "github.com/docker/buildx/util/progress"
- "github.com/docker/compose/v2/pkg/utils"
+ "github.com/compose-spec/compose-go/v2/cli"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli/command"
+ cliopts "github.com/docker/cli/opts"
+ "github.com/docker/compose/v5/cmd/display"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
)
type buildOptions struct {
- *projectOptions
- composeOptions
- quiet bool
- pull bool
- progress string
- args []string
- noCache bool
- memory string
- ssh string
+ *ProjectOptions
+ quiet bool
+ pull bool
+ push bool
+ args []string
+ noCache bool
+ memory cliopts.MemBytes
+ ssh string
+ builder string
+ deps bool
+ print bool
+ check bool
+ sbom string
+ provenance string
}
func (opts buildOptions) toAPIBuildOptions(services []string) (api.BuildOptions, error) {
var SSHKeys []types.SSHKey
- var err error
if opts.ssh != "" {
- SSHKeys, err = loader.ParseShortSSHSyntax(opts.ssh)
- if err != nil {
- return api.BuildOptions{}, err
+ id, path, found := strings.Cut(opts.ssh, "=")
+ if !found && id != "default" {
+ return api.BuildOptions{}, fmt.Errorf("invalid ssh key %q", opts.ssh)
}
+ SSHKeys = append(SSHKeys, types.SSHKey{
+ ID: id,
+ Path: path,
+ })
+ }
+ builderName := opts.builder
+ if builderName == "" {
+ builderName = os.Getenv("BUILDX_BUILDER")
+ }
+
+ uiMode := display.Mode
+ if uiMode == display.ModeJSON {
+ uiMode = "rawjson"
}
return api.BuildOptions{
- Pull: opts.pull,
- Progress: opts.progress,
- Args: types.NewMappingWithEquals(opts.args),
- NoCache: opts.noCache,
- Quiet: opts.quiet,
- Services: services,
- SSHs: SSHKeys,
+ Pull: opts.pull,
+ Push: opts.push,
+ Progress: uiMode,
+ Args: types.NewMappingWithEquals(opts.args),
+ NoCache: opts.noCache,
+ Quiet: opts.quiet,
+ Services: services,
+ Deps: opts.deps,
+ Memory: int64(opts.memory),
+ Print: opts.print,
+ Check: opts.check,
+ SSHs: SSHKeys,
+ Builder: builderName,
+ SBOM: opts.sbom,
+ Provenance: opts.provenance,
}, nil
}
-var printerModes = []string{
- buildx.PrinterModeAuto,
- buildx.PrinterModeTty,
- buildx.PrinterModePlain,
- buildx.PrinterModeQuiet,
-}
-
-func buildCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func buildCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := buildOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
cmd := &cobra.Command{
- Use: "build [SERVICE...]",
+ Use: "build [OPTIONS] [SERVICE...]",
Short: "Build or rebuild services",
PreRunE: Adapt(func(ctx context.Context, args []string) error {
- if opts.memory != "" {
- fmt.Println("WARNING --memory is ignored as not supported in buildkit.")
- }
if opts.quiet {
- opts.progress = buildx.PrinterModeQuiet
+ display.Mode = display.ModeQuiet
devnull, err := os.Open(os.DevNull)
if err != nil {
return err
}
os.Stdout = devnull
}
- if !utils.StringContains(printerModes, opts.progress) {
- return fmt.Errorf("unsupported --progress value %q", opts.progress)
- }
return nil
}),
RunE: AdaptCmd(func(ctx context.Context, cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("ssh") && opts.ssh == "" {
opts.ssh = "default"
}
- return runBuild(ctx, backend, opts, args)
+ if cmd.Flags().Changed("progress") && opts.ssh == "" {
+ fmt.Fprint(os.Stderr, "--progress is a global compose flag, better use `docker compose --progress xx build ...\n")
+ }
+ return runBuild(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
- cmd.Flags().BoolVarP(&opts.quiet, "quiet", "q", false, "Don't print anything to STDOUT")
- cmd.Flags().BoolVar(&opts.pull, "pull", false, "Always attempt to pull a newer version of the image.")
- cmd.Flags().StringVar(&opts.progress, "progress", buildx.PrinterModeAuto, fmt.Sprintf(`Set type of progress output (%s)`, strings.Join(printerModes, ", ")))
- cmd.Flags().StringArrayVar(&opts.args, "build-arg", []string{}, "Set build-time variables for services.")
- cmd.Flags().StringVar(&opts.ssh, "ssh", "", "Set SSH authentications used when building service images. (use 'default' for using your default SSH Agent)")
- cmd.Flags().Bool("parallel", true, "Build images in parallel. DEPRECATED")
- cmd.Flags().MarkHidden("parallel") //nolint:errcheck
- cmd.Flags().Bool("compress", true, "Compress the build context using gzip. DEPRECATED")
- cmd.Flags().MarkHidden("compress") //nolint:errcheck
- cmd.Flags().Bool("force-rm", true, "Always remove intermediate containers. DEPRECATED")
- cmd.Flags().MarkHidden("force-rm") //nolint:errcheck
- cmd.Flags().BoolVar(&opts.noCache, "no-cache", false, "Do not use cache when building the image")
- cmd.Flags().Bool("no-rm", false, "Do not remove intermediate containers after a successful build. DEPRECATED")
- cmd.Flags().MarkHidden("no-rm") //nolint:errcheck
- cmd.Flags().StringVarP(&opts.memory, "memory", "m", "", "Set memory limit for the build container. Not supported on buildkit yet.")
- cmd.Flags().MarkHidden("memory") //nolint:errcheck
+ flags := cmd.Flags()
+ flags.BoolVar(&opts.push, "push", false, "Push service images")
+ flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the build output")
+ flags.BoolVar(&opts.pull, "pull", false, "Always attempt to pull a newer version of the image")
+ flags.StringArrayVar(&opts.args, "build-arg", []string{}, "Set build-time variables for services")
+ flags.StringVar(&opts.ssh, "ssh", "", "Set SSH authentications used when building service images. (use 'default' for using your default SSH Agent)")
+ flags.StringVar(&opts.builder, "builder", "", "Set builder to use")
+ flags.BoolVar(&opts.deps, "with-dependencies", false, "Also build dependencies (transitively)")
+ flags.StringVar(&opts.provenance, "provenance", "", `Add a provenance attestation`)
+ flags.StringVar(&opts.sbom, "sbom", "", `Add a SBOM attestation`)
+
+ flags.Bool("parallel", true, "Build images in parallel. DEPRECATED")
+ flags.MarkHidden("parallel") //nolint:errcheck
+ flags.Bool("compress", true, "Compress the build context using gzip. DEPRECATED")
+ flags.MarkHidden("compress") //nolint:errcheck
+ flags.Bool("force-rm", true, "Always remove intermediate containers. DEPRECATED")
+ flags.MarkHidden("force-rm") //nolint:errcheck
+ flags.BoolVar(&opts.noCache, "no-cache", false, "Do not use cache when building the image")
+ flags.Bool("no-rm", false, "Do not remove intermediate containers after a successful build. DEPRECATED")
+ flags.MarkHidden("no-rm") //nolint:errcheck
+ flags.VarP(&opts.memory, "memory", "m", "Set memory limit for the build container. Not supported by BuildKit.")
+ flags.StringVar(&p.Progress, "progress", "", fmt.Sprintf(`Set type of ui output (%s)`, strings.Join(printerModes, ", ")))
+ flags.MarkHidden("progress") //nolint:errcheck
+ flags.BoolVar(&opts.print, "print", false, "Print equivalent bake file")
+ flags.BoolVar(&opts.check, "check", false, "Check build configuration")
return cmd
}
-func runBuild(ctx context.Context, backend api.Service, opts buildOptions, services []string) error {
- project, err := opts.toProject(services, cli.WithResolvedPaths(true))
+func runBuild(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts buildOptions, services []string) error {
+ if opts.print {
+ backendOptions.Add(compose.WithEventProcessor(display.Quiet()))
+ }
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+
+ opts.All = true // do not drop resources as build may involve some dependencies by additional_contexts
+ project, _, err := opts.ToProject(ctx, dockerCli, backend, nil, cli.WithoutEnvironmentResolution)
if err != nil {
return err
}
+ if err := applyPlatforms(project, false); err != nil {
+ return err
+ }
+
apiBuildOptions, err := opts.toAPIBuildOptions(services)
if err != nil {
return err
}
+ apiBuildOptions.Attestations = true
+
return backend.Build(ctx, project, apiBuildOptions)
}
diff --git a/cmd/compose/commit.go b/cmd/compose/commit.go
new file mode 100644
index 00000000000..ede09dc95e6
--- /dev/null
+++ b/cmd/compose/commit.go
@@ -0,0 +1,96 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/cli/opts"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
+ "github.com/spf13/cobra"
+)
+
+type commitOptions struct {
+ *ProjectOptions
+
+ service string
+ reference string
+
+ pause bool
+ comment string
+ author string
+ changes opts.ListOpts
+
+ index int
+}
+
+func commitCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
+ options := commitOptions{
+ ProjectOptions: p,
+ }
+ cmd := &cobra.Command{
+ Use: "commit [OPTIONS] SERVICE [REPOSITORY[:TAG]]",
+ Short: "Create a new image from a service container's changes",
+ Args: cobra.RangeArgs(1, 2),
+ PreRunE: Adapt(func(ctx context.Context, args []string) error {
+ options.service = args[0]
+ if len(args) > 1 {
+ options.reference = args[1]
+ }
+
+ return nil
+ }),
+ RunE: Adapt(func(ctx context.Context, args []string) error {
+ return runCommit(ctx, dockerCli, backendOptions, options)
+ }),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
+ }
+
+ flags := cmd.Flags()
+ flags.IntVar(&options.index, "index", 0, "index of the container if service has multiple replicas.")
+
+ flags.BoolVarP(&options.pause, "pause", "p", true, "Pause container during commit")
+ flags.StringVarP(&options.comment, "message", "m", "", "Commit message")
+ flags.StringVarP(&options.author, "author", "a", "", `Author (e.g., "John Hannibal Smith ")`)
+ options.changes = opts.NewListOpts(nil)
+ flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image")
+
+ return cmd
+}
+
+func runCommit(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, options commitOptions) error {
+ projectName, err := options.toProjectName(ctx, dockerCli)
+ if err != nil {
+ return err
+ }
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+ return backend.Commit(ctx, projectName, api.CommitOptions{
+ Service: options.service,
+ Reference: options.reference,
+ Pause: options.pause,
+ Comment: options.comment,
+ Author: options.author,
+ Changes: options.changes,
+ Index: options.index,
+ })
+}
diff --git a/cmd/compose/completion.go b/cmd/compose/completion.go
index 891539b6b7c..14266a49060 100644
--- a/cmd/compose/completion.go
+++ b/cmd/compose/completion.go
@@ -17,8 +17,12 @@
package compose
import (
+ "sort"
"strings"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
)
@@ -27,22 +31,88 @@ type validArgsFn func(cmd *cobra.Command, args []string, toComplete string) ([]s
func noCompletion() validArgsFn {
return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
- return nil, cobra.ShellCompDirectiveNoFileComp
+ return []string{}, cobra.ShellCompDirectiveNoSpace
}
}
-func serviceCompletion(p *projectOptions) validArgsFn {
+func completeServiceNames(dockerCli command.Cli, p *ProjectOptions) validArgsFn {
return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
- project, err := p.toProject(nil)
+ p.Offline = true
+ backend, err := compose.NewComposeService(dockerCli)
if err != nil {
return nil, cobra.ShellCompDirectiveNoFileComp
}
- var serviceNames []string
- for _, s := range project.ServiceNames() {
+
+ project, _, err := p.ToProject(cmd.Context(), dockerCli, backend, nil)
+ if err != nil {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ var values []string
+ serviceNames := append(project.ServiceNames(), project.DisabledServiceNames()...)
+ for _, s := range serviceNames {
if toComplete == "" || strings.HasPrefix(s, toComplete) {
- serviceNames = append(serviceNames, s)
+ values = append(values, s)
+ }
+ }
+ return values, cobra.ShellCompDirectiveNoFileComp
+ }
+}
+
+func completeProjectNames(dockerCli command.Cli, backendOptions *BackendOptions) func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return nil, cobra.ShellCompDirectiveError
+ }
+
+ list, err := backend.List(cmd.Context(), api.ListOptions{
+ All: true,
+ })
+ if err != nil {
+ return nil, cobra.ShellCompDirectiveError
+ }
+ var values []string
+ for _, stack := range list {
+ if strings.HasPrefix(stack.Name, toComplete) {
+ values = append(values, stack.Name)
}
}
- return serviceNames, cobra.ShellCompDirectiveNoFileComp
+ return values, cobra.ShellCompDirectiveNoFileComp
+ }
+}
+
+func completeProfileNames(dockerCli command.Cli, p *ProjectOptions) validArgsFn {
+ return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ p.Offline = true
+ backend, err := compose.NewComposeService(dockerCli)
+ if err != nil {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+
+ project, _, err := p.ToProject(cmd.Context(), dockerCli, backend, nil)
+ if err != nil {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+
+ allProfileNames := project.AllServices().GetProfiles()
+ sort.Strings(allProfileNames)
+
+ var values []string
+ for _, profileName := range allProfileNames {
+ if strings.HasPrefix(profileName, toComplete) {
+ values = append(values, profileName)
+ }
+ }
+ return values, cobra.ShellCompDirectiveNoFileComp
+ }
+}
+
+func completeScaleArgs(cli command.Cli, p *ProjectOptions) cobra.CompletionFunc {
+ return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ completions, directive := completeServiceNames(cli, p)(cmd, args, toComplete)
+ for i, completion := range completions {
+ completions[i] = completion + "="
+ }
+ return completions, directive
}
}
diff --git a/cmd/compose/compose.go b/cmd/compose/compose.go
index c8596395f60..fef0ed45f6e 100644
--- a/cmd/compose/compose.go
+++ b/cmd/compose/compose.go
@@ -18,31 +18,84 @@ package compose
import (
"context"
+ "encoding/json"
+ "errors"
"fmt"
+ "io"
"os"
"os/signal"
"path/filepath"
+ "strconv"
"strings"
"syscall"
- "github.com/compose-spec/compose-go/cli"
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/cli"
+ "github.com/compose-spec/compose-go/v2/dotenv"
+ "github.com/compose-spec/compose-go/v2/loader"
+ "github.com/compose-spec/compose-go/v2/types"
+ composegoutils "github.com/compose-spec/compose-go/v2/utils"
+ "github.com/docker/buildx/util/logutil"
dockercli "github.com/docker/cli/cli"
- "github.com/docker/cli/cli-plugins/manager"
+ "github.com/docker/cli/cli-plugins/metadata"
"github.com/docker/cli/cli/command"
+ "github.com/docker/cli/pkg/kvfile"
+ "github.com/docker/compose/v5/cmd/display"
+ "github.com/docker/compose/v5/cmd/formatter"
+ "github.com/docker/compose/v5/internal/tracing"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
+ "github.com/docker/compose/v5/pkg/remote"
+ "github.com/docker/compose/v5/pkg/utils"
"github.com/morikuni/aec"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
+)
- "github.com/docker/compose/v2/cmd/formatter"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/compose"
- "github.com/docker/compose/v2/pkg/progress"
- "github.com/docker/compose/v2/pkg/utils"
+const (
+ // ComposeParallelLimit set the limit running concurrent operation on docker engine
+ ComposeParallelLimit = "COMPOSE_PARALLEL_LIMIT"
+ // ComposeProjectName define the project name to be used, instead of guessing from parent directory
+ ComposeProjectName = "COMPOSE_PROJECT_NAME"
+ // ComposeCompatibility try to mimic compose v1 as much as possible
+ ComposeCompatibility = "COMPOSE_COMPATIBILITY"
+ // ComposeRemoveOrphans remove "orphaned" containers, i.e. containers tagged for current project but not declared as service
+ ComposeRemoveOrphans = "COMPOSE_REMOVE_ORPHANS"
+ // ComposeIgnoreOrphans ignore "orphaned" containers
+ ComposeIgnoreOrphans = "COMPOSE_IGNORE_ORPHANS"
+ // ComposeEnvFiles defines the env files to use if --env-file isn't used
+ ComposeEnvFiles = "COMPOSE_ENV_FILES"
+ // ComposeMenu defines if the navigation menu should be rendered. Can be also set via --menu
+ ComposeMenu = "COMPOSE_MENU"
+ // ComposeProgress defines type of progress output, if --progress isn't used
+ ComposeProgress = "COMPOSE_PROGRESS"
)
+// rawEnv load a dot env file using docker/cli key=value parser, without attempt to interpolate or evaluate values
+func rawEnv(r io.Reader, filename string, vars map[string]string, lookup func(key string) (string, bool)) error {
+ lines, err := kvfile.ParseFromReader(r, lookup)
+ if err != nil {
+ return fmt.Errorf("failed to parse env_file %s: %w", filename, err)
+ }
+ for _, line := range lines {
+ key, value, _ := strings.Cut(line, "=")
+ vars[key] = value
+ }
+ return nil
+}
+
+var stdioToStdout bool
+
+func init() {
+ // compose evaluates env file values for interpolation
+ // `raw` format allows to load env_file with the same parser used by docker run --env-file
+ dotenv.RegisterFormat("raw", rawEnv)
+
+ if v, ok := os.LookupEnv("COMPOSE_STATUS_STDOUT"); ok {
+ stdioToStdout, _ = strconv.ParseBool(v)
+ }
+}
+
// Command defines a compose CLI command as a func with args
type Command func(context.Context, []string) error
@@ -52,31 +105,25 @@ type CobraCommand func(context.Context, *cobra.Command, []string) error
// AdaptCmd adapt a CobraCommand func to cobra library
func AdaptCmd(fn CobraCommand) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
- ctx := cmd.Context()
- contextString := fmt.Sprintf("%s", ctx)
- if !strings.HasSuffix(contextString, ".WithCancel") { // need to handle cancel
- cancellableCtx, cancel := context.WithCancel(cmd.Context())
- ctx = cancellableCtx
- s := make(chan os.Signal, 1)
- signal.Notify(s, syscall.SIGTERM, syscall.SIGINT)
- go func() {
- <-s
- cancel()
- }()
- }
+ ctx, cancel := context.WithCancel(cmd.Context())
+
+ s := make(chan os.Signal, 1)
+ signal.Notify(s, syscall.SIGTERM, syscall.SIGINT)
+ go func() {
+ <-s
+ cancel()
+ signal.Stop(s)
+ close(s)
+ }()
+
err := fn(ctx, cmd, args)
- var composeErr compose.Error
if api.IsErrCanceled(err) || errors.Is(ctx.Err(), context.Canceled) {
err = dockercli.StatusError{
StatusCode: 130,
- Status: compose.CanceledStatus,
}
}
- if errors.As(err, &composeErr) {
- err = dockercli.StatusError{
- StatusCode: composeErr.GetMetricsFailureCategory().ExitCode,
- Status: err.Error(),
- }
+ if display.Mode == display.ModeJSON {
+ err = makeJSONError(err)
}
return err
}
@@ -89,14 +136,18 @@ func Adapt(fn Command) func(cmd *cobra.Command, args []string) error {
})
}
-type projectOptions struct {
- ProjectName string
- Profiles []string
- ConfigPaths []string
- WorkDir string
- ProjectDir string
- EnvFile string
- Compatibility bool
+type ProjectOptions struct {
+ ProjectName string
+ Profiles []string
+ ConfigPaths []string
+ WorkDir string
+ ProjectDir string
+ EnvFiles []string
+ Compatibility bool
+ Progress string
+ Offline bool
+ All bool
+ insecureRegistries []string
}
// ProjectFunc does stuff within a types.Project
@@ -106,116 +157,250 @@ type ProjectFunc func(ctx context.Context, project *types.Project) error
type ProjectServicesFunc func(ctx context.Context, project *types.Project, services []string) error
// WithProject creates a cobra run command from a ProjectFunc based on configured project options and selected services
-func (o *projectOptions) WithProject(fn ProjectFunc) func(cmd *cobra.Command, args []string) error {
- return o.WithServices(func(ctx context.Context, project *types.Project, services []string) error {
+func (o *ProjectOptions) WithProject(fn ProjectFunc, dockerCli command.Cli) func(cmd *cobra.Command, args []string) error {
+ return o.WithServices(dockerCli, func(ctx context.Context, project *types.Project, services []string) error {
return fn(ctx, project)
})
}
// WithServices creates a cobra run command from a ProjectFunc based on configured project options and selected services
-func (o *projectOptions) WithServices(fn ProjectServicesFunc) func(cmd *cobra.Command, args []string) error {
- return Adapt(func(ctx context.Context, args []string) error {
- project, err := o.toProject(args, cli.WithResolvedPaths(true))
+func (o *ProjectOptions) WithServices(dockerCli command.Cli, fn ProjectServicesFunc) func(cmd *cobra.Command, args []string) error {
+ return Adapt(func(ctx context.Context, services []string) error {
+ backend, err := compose.NewComposeService(dockerCli)
if err != nil {
return err
}
- return fn(ctx, project, args)
+ project, metrics, err := o.ToProject(ctx, dockerCli, backend, services, cli.WithoutEnvironmentResolution)
+ if err != nil {
+ return err
+ }
+
+ ctx = context.WithValue(ctx, tracing.MetricsKey{}, metrics)
+
+ project, err = project.WithServicesEnvironmentResolved(true)
+ if err != nil {
+ return err
+ }
+
+ return fn(ctx, project, services)
})
}
-func (o *projectOptions) addProjectFlags(f *pflag.FlagSet) {
+type jsonErrorData struct {
+ Error bool `json:"error,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+func errorAsJSON(message string) string {
+ errorMessage := &jsonErrorData{
+ Error: true,
+ Message: message,
+ }
+ marshal, err := json.Marshal(errorMessage)
+ if err == nil {
+ return string(marshal)
+ } else {
+ return message
+ }
+}
+
+func makeJSONError(err error) error {
+ if err == nil {
+ return nil
+ }
+ var statusErr dockercli.StatusError
+ if errors.As(err, &statusErr) {
+ return dockercli.StatusError{
+ StatusCode: statusErr.StatusCode,
+ Status: errorAsJSON(statusErr.Status),
+ }
+ }
+ return fmt.Errorf("%s", errorAsJSON(err.Error()))
+}
+
+func (o *ProjectOptions) addProjectFlags(f *pflag.FlagSet) {
f.StringArrayVar(&o.Profiles, "profile", []string{}, "Specify a profile to enable")
f.StringVarP(&o.ProjectName, "project-name", "p", "", "Project name")
f.StringArrayVarP(&o.ConfigPaths, "file", "f", []string{}, "Compose configuration files")
- f.StringVar(&o.EnvFile, "env-file", "", "Specify an alternate environment file.")
- f.StringVar(&o.ProjectDir, "project-directory", "", "Specify an alternate working directory\n(default: the path of the Compose file)")
- f.StringVar(&o.WorkDir, "workdir", "", "DEPRECATED! USE --project-directory INSTEAD.\nSpecify an alternate working directory\n(default: the path of the Compose file)")
+ f.StringArrayVar(&o.insecureRegistries, "insecure-registry", []string{}, "Use insecure registry to pull Compose OCI artifacts. Doesn't apply to images")
+ _ = f.MarkHidden("insecure-registry")
+ f.StringArrayVar(&o.EnvFiles, "env-file", defaultStringArrayVar(ComposeEnvFiles), "Specify an alternate environment file")
+ f.StringVar(&o.ProjectDir, "project-directory", "", "Specify an alternate working directory\n(default: the path of the, first specified, Compose file)")
+ f.StringVar(&o.WorkDir, "workdir", "", "DEPRECATED! USE --project-directory INSTEAD.\nSpecify an alternate working directory\n(default: the path of the, first specified, Compose file)")
f.BoolVar(&o.Compatibility, "compatibility", false, "Run compose in backward compatibility mode")
+ f.StringVar(&o.Progress, "progress", os.Getenv(ComposeProgress), fmt.Sprintf(`Set type of progress output (%s)`, strings.Join(printerModes, ", ")))
+ f.BoolVar(&o.All, "all-resources", false, "Include all resources, even those not used by services")
_ = f.MarkHidden("workdir")
}
-func (o *projectOptions) toProjectName() (string, error) {
+// get default value for a command line flag that is set by a coma-separated value in environment variable
+func defaultStringArrayVar(env string) []string {
+ return strings.FieldsFunc(os.Getenv(env), func(c rune) bool {
+ return c == ','
+ })
+}
+
+func (o *ProjectOptions) projectOrName(ctx context.Context, dockerCli command.Cli, services ...string) (*types.Project, string, error) {
+ name := o.ProjectName
+ var project *types.Project
+ if len(o.ConfigPaths) > 0 || o.ProjectName == "" {
+ backend, err := compose.NewComposeService(dockerCli)
+ if err != nil {
+ return nil, "", err
+ }
+
+ p, _, err := o.ToProject(ctx, dockerCli, backend, services, cli.WithDiscardEnvFile, cli.WithoutEnvironmentResolution)
+ if err != nil {
+ envProjectName := os.Getenv(ComposeProjectName)
+ if envProjectName != "" {
+ return nil, envProjectName, nil
+ }
+ return nil, "", err
+ }
+ project = p
+ name = p.Name
+ }
+ return project, name, nil
+}
+
+func (o *ProjectOptions) toProjectName(ctx context.Context, dockerCli command.Cli) (string, error) {
if o.ProjectName != "" {
return o.ProjectName, nil
}
- envProjectName := os.Getenv("COMPOSE_PROJECT_NAME")
+ envProjectName := os.Getenv(ComposeProjectName)
if envProjectName != "" {
return envProjectName, nil
}
- project, err := o.toProject(nil)
+ backend, err := compose.NewComposeService(dockerCli)
+ if err != nil {
+ return "", err
+ }
+
+ project, _, err := o.ToProject(ctx, dockerCli, backend, nil)
if err != nil {
return "", err
}
return project.Name, nil
}
-func (o *projectOptions) toProject(services []string, po ...cli.ProjectOptionsFn) (*types.Project, error) {
- options, err := o.toProjectOptions(po...)
- if err != nil {
- return nil, compose.WrapComposeError(err)
+func (o *ProjectOptions) ToModel(ctx context.Context, dockerCli command.Cli, services []string, po ...cli.ProjectOptionsFn) (map[string]any, error) {
+ remotes := o.remoteLoaders(dockerCli)
+ for _, r := range remotes {
+ po = append(po, cli.WithResourceLoader(r))
}
- project, err := cli.ProjectFromOptions(options)
+ options, err := o.toProjectOptions(po...)
if err != nil {
- return nil, compose.WrapComposeError(err)
+ return nil, err
}
- if o.Compatibility || utils.StringToBool(project.Environment["COMPOSE_COMPATIBILITY"]) {
- compose.Separator = "_"
+ if o.Compatibility || utils.StringToBool(options.Environment[ComposeCompatibility]) {
+ api.Separator = "_"
}
- ef := o.EnvFile
- if ef != "" && !filepath.IsAbs(ef) {
- ef = filepath.Join(project.WorkingDir, o.EnvFile)
- }
- for i, s := range project.Services {
- s.CustomLabels = map[string]string{
- api.ProjectLabel: project.Name,
- api.ServiceLabel: s.Name,
- api.VersionLabel: api.ComposeVersion,
- api.WorkingDirLabel: project.WorkingDir,
- api.ConfigFilesLabel: strings.Join(project.ComposeFiles, ","),
- api.OneoffLabel: "False", // default, will be overridden by `run` command
- }
- if ef != "" {
- s.CustomLabels[api.EnvironmentFileLabel] = ef
- }
- project.Services[i] = s
- }
+ return options.LoadModel(ctx)
+}
- if len(services) > 0 {
- s, err := project.GetServices(services...)
- if err != nil {
- return nil, err
+// ToProject loads a Compose project using the LoadProject API.
+// Accepts optional cli.ProjectOptionsFn to control loader behavior.
+func (o *ProjectOptions) ToProject(ctx context.Context, dockerCli command.Cli, backend api.Compose, services []string, po ...cli.ProjectOptionsFn) (*types.Project, tracing.Metrics, error) {
+ var metrics tracing.Metrics
+ remotes := o.remoteLoaders(dockerCli)
+
+ // Setup metrics listener to collect project data
+ metricsListener := func(event string, metadata map[string]any) {
+ switch event {
+ case "extends":
+ metrics.CountExtends++
+ case "include":
+ paths := metadata["path"].(types.StringList)
+ for _, path := range paths {
+ var isRemote bool
+ for _, r := range remotes {
+ if r.Accept(path) {
+ isRemote = true
+ break
+ }
+ }
+ if isRemote {
+ metrics.CountIncludesRemote++
+ } else {
+ metrics.CountIncludesLocal++
+ }
+ }
}
- o.Profiles = append(o.Profiles, s.GetProfiles()...)
}
- if profiles, ok := options.Environment["COMPOSE_PROFILES"]; ok {
- o.Profiles = append(o.Profiles, strings.Split(profiles, ",")...)
+ loadOpts := api.ProjectLoadOptions{
+ ProjectName: o.ProjectName,
+ ConfigPaths: o.ConfigPaths,
+ WorkingDir: o.ProjectDir,
+ EnvFiles: o.EnvFiles,
+ Profiles: o.Profiles,
+ Services: services,
+ Offline: o.Offline,
+ All: o.All,
+ Compatibility: o.Compatibility,
+ ProjectOptionsFns: po,
+ LoadListeners: []api.LoadListener{metricsListener},
+ OCI: api.OCIOptions{
+ InsecureRegistries: o.insecureRegistries,
+ },
}
- project.ApplyProfiles(o.Profiles)
+ project, err := backend.LoadProject(ctx, loadOpts)
+ if err != nil {
+ return nil, metrics, err
+ }
- project.WithoutUnnecessaryResources()
+ return project, metrics, nil
+}
- err = project.ForServices(services)
- return project, err
+func (o *ProjectOptions) remoteLoaders(dockerCli command.Cli) []loader.ResourceLoader {
+ if o.Offline {
+ return nil
+ }
+ git := remote.NewGitRemoteLoader(dockerCli, o.Offline)
+ oci := remote.NewOCIRemoteLoader(dockerCli, o.Offline, api.OCIOptions{})
+ return []loader.ResourceLoader{git, oci}
}
-func (o *projectOptions) toProjectOptions(po ...cli.ProjectOptionsFn) (*cli.ProjectOptions, error) {
- return cli.NewProjectOptions(o.ConfigPaths,
- append(po,
- cli.WithWorkingDirectory(o.ProjectDir),
- cli.WithEnvFile(o.EnvFile),
- cli.WithDotEnv,
- cli.WithOsEnv,
- cli.WithConfigFileEnv,
- cli.WithDefaultConfigPath,
- cli.WithName(o.ProjectName))...)
+func (o *ProjectOptions) toProjectOptions(po ...cli.ProjectOptionsFn) (*cli.ProjectOptions, error) {
+ opts := []cli.ProjectOptionsFn{
+ cli.WithWorkingDirectory(o.ProjectDir),
+ // First apply os.Environment, always win
+ cli.WithOsEnv,
+ }
+
+ if _, present := os.LookupEnv("PWD"); !present {
+ if pwd, err := os.Getwd(); err != nil {
+ return nil, err
+ } else {
+ opts = append(opts, cli.WithEnv([]string{"PWD=" + pwd}))
+ }
+ }
+
+ opts = append(opts,
+ // Load PWD/.env if present and no explicit --env-file has been set
+ cli.WithEnvFiles(o.EnvFiles...),
+ // read dot env file to populate project environment
+ cli.WithDotEnv,
+ // get compose file path set by COMPOSE_FILE
+ cli.WithConfigFileEnv,
+ // if none was selected, get default compose.yaml file from current dir or parent folder
+ cli.WithDefaultConfigPath,
+ // .. and then, a project directory != PWD maybe has been set so let's load .env file
+ cli.WithEnvFiles(o.EnvFiles...),
+ cli.WithDotEnv,
+ // eventually COMPOSE_PROFILES should have been set
+ cli.WithDefaultProfiles(o.Profiles...),
+ cli.WithName(o.ProjectName),
+ )
+
+ return cli.NewProjectOptions(o.ConfigPaths, append(po, opts...)...)
}
// PluginName is the name of the plugin
@@ -223,33 +408,54 @@ const PluginName = "compose"
// RunningAsStandalone detects when running as a standalone program
func RunningAsStandalone() bool {
- return len(os.Args) < 2 || os.Args[1] != manager.MetadataSubcommandName && os.Args[1] != PluginName
+ return len(os.Args) < 2 || os.Args[1] != metadata.MetadataSubcommandName && os.Args[1] != PluginName
+}
+
+type BackendOptions struct {
+ Options []compose.Option
+}
+
+func (o *BackendOptions) Add(option compose.Option) {
+ o.Options = append(o.Options, option)
}
// RootCommand returns the compose command with its child commands
-func RootCommand(dockerCli command.Cli, backend api.Service) *cobra.Command {
- opts := projectOptions{}
+func RootCommand(dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command { //nolint:gocyclo
+ // filter out useless commandConn.CloseWrite warning message that can occur
+ // when using a remote context that is unreachable: "commandConn.CloseWrite: commandconn: failed to wait: signal: killed"
+ // https://github.com/docker/cli/blob/e1f24d3c93df6752d3c27c8d61d18260f141310c/cli/connhelper/commandconn/commandconn.go#L203-L215
+ logrus.AddHook(logutil.NewFilter([]logrus.Level{
+ logrus.WarnLevel,
+ },
+ "commandConn.CloseWrite:",
+ "commandConn.CloseRead:",
+ ))
+
+ opts := ProjectOptions{}
var (
- ansi string
- noAnsi bool
- verbose bool
- version bool
+ ansi string
+ noAnsi bool
+ verbose bool
+ version bool
+ parallel int
+ dryRun bool
)
- command := &cobra.Command{
+ c := &cobra.Command{
Short: "Docker Compose",
+ Long: "Define and run multi-container applications with Docker",
Use: PluginName,
TraverseChildren: true,
- // By default (no Run/RunE in parent command) for typos in subcommands, cobra displays the help of parent command but exit(0) !
+ // By default (no Run/RunE in parent c) for typos in subcommands, cobra displays the help of parent c but exit(0) !
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cmd.Help()
}
if version {
- return versionCommand().Execute()
+ return versionCommand(dockerCli).Execute()
}
_ = cmd.Help()
return dockercli.StatusError{
- StatusCode: compose.CommandSyntaxFailure.ExitCode,
+ StatusCode: 1,
Status: fmt.Sprintf("unknown docker command: %q", "compose "+args[0]),
}
},
@@ -264,23 +470,77 @@ func RootCommand(dockerCli command.Cli, backend api.Service) *cobra.Command {
}
}
}
+
+ if verbose {
+ logrus.SetLevel(logrus.TraceLevel)
+ }
+
+ err := setEnvWithDotEnv(opts)
+ if err != nil {
+ return err
+ }
if noAnsi {
if ansi != "auto" {
return errors.New(`cannot specify DEPRECATED "--no-ansi" and "--ansi". Please use only "--ansi"`)
}
ansi = "never"
- fmt.Fprint(os.Stderr, aec.Apply("option '--no-ansi' is DEPRECATED ! Please use '--ansi' instead.\n", aec.RedF))
+ fmt.Fprint(os.Stderr, "option '--no-ansi' is DEPRECATED ! Please use '--ansi' instead.\n")
}
- if verbose {
- logrus.SetLevel(logrus.TraceLevel)
+ if v, ok := os.LookupEnv("COMPOSE_ANSI"); ok && !cmd.Flags().Changed("ansi") {
+ ansi = v
+ }
+ formatter.SetANSIMode(dockerCli, ansi)
+
+ if noColor, ok := os.LookupEnv("NO_COLOR"); ok && noColor != "" {
+ display.NoColor()
+ formatter.SetANSIMode(dockerCli, formatter.Never)
}
- formatter.SetANSIMode(ansi)
+
switch ansi {
case "never":
- progress.Mode = progress.ModePlain
- case "tty":
- progress.Mode = progress.ModeTTY
+ display.Mode = display.ModePlain
+ case "always":
+ display.Mode = display.ModeTTY
}
+
+ var ep api.EventProcessor
+ switch opts.Progress {
+ case "", display.ModeAuto:
+ switch {
+ case ansi == "never":
+ display.Mode = display.ModePlain
+ ep = display.Plain(dockerCli.Err())
+ case dockerCli.Out().IsTerminal():
+ ep = display.Full(dockerCli.Err(), stdinfo(dockerCli))
+ default:
+ ep = display.Plain(dockerCli.Err())
+ }
+ case display.ModeTTY:
+ if ansi == "never" {
+ return fmt.Errorf("can't use --progress tty while ANSI support is disabled")
+ }
+ display.Mode = display.ModeTTY
+ ep = display.Full(dockerCli.Err(), stdinfo(dockerCli))
+
+ case display.ModePlain:
+ if ansi == "always" {
+ return fmt.Errorf("can't use --progress plain while ANSI support is forced")
+ }
+ display.Mode = display.ModePlain
+ ep = display.Plain(dockerCli.Err())
+ case display.ModeQuiet, "none":
+ display.Mode = display.ModeQuiet
+ ep = display.Quiet()
+ case display.ModeJSON:
+ display.Mode = display.ModeJSON
+ logrus.SetFormatter(&logrus.JSONFormatter{})
+ ep = display.JSON(dockerCli.Err())
+ default:
+ return fmt.Errorf("unsupported --progress value %q", opts.Progress)
+ }
+ backendOptions.Add(compose.WithEventProcessor(ep))
+
+ // (4) options validation / normalization
if opts.WorkDir != "" {
if opts.ProjectDir != "" {
return errors.New(`cannot specify DEPRECATED "--workdir" and "--project-directory". Please use only "--project-directory" instead`)
@@ -288,45 +548,157 @@ func RootCommand(dockerCli command.Cli, backend api.Service) *cobra.Command {
opts.ProjectDir = opts.WorkDir
fmt.Fprint(os.Stderr, aec.Apply("option '--workdir' is DEPRECATED at root level! Please use '--project-directory' instead.\n", aec.RedF))
}
+ for i, file := range opts.EnvFiles {
+ if !filepath.IsAbs(file) {
+ file, err := filepath.Abs(file)
+ if err != nil {
+ return err
+ }
+ opts.EnvFiles[i] = file
+ }
+ }
+
+ composeCmd := cmd
+ for composeCmd.Name() != PluginName {
+ if !composeCmd.HasParent() {
+ return fmt.Errorf("error parsing command line, expected %q", PluginName)
+ }
+ composeCmd = composeCmd.Parent()
+ }
+
+ if v, ok := os.LookupEnv(ComposeParallelLimit); ok && !composeCmd.Flags().Changed("parallel") {
+ i, err := strconv.Atoi(v)
+ if err != nil {
+ return fmt.Errorf("%s must be an integer (found: %q)", ComposeParallelLimit, v)
+ }
+ parallel = i
+ }
+ if parallel > 0 {
+ logrus.Debugf("Limiting max concurrency to %d jobs", parallel)
+ backendOptions.Add(compose.WithMaxConcurrency(parallel))
+ }
+
+ // dry run detection
+ if dryRun {
+ backendOptions.Add(compose.WithDryRun)
+ }
return nil
},
}
- command.AddCommand(
- upCommand(&opts, backend),
- downCommand(&opts, backend),
- startCommand(&opts, backend),
- restartCommand(&opts, backend),
- stopCommand(&opts, backend),
- psCommand(&opts, backend),
- listCommand(backend),
- logsCommand(&opts, backend),
- convertCommand(&opts, backend),
- killCommand(&opts, backend),
- runCommand(&opts, dockerCli, backend),
- removeCommand(&opts, backend),
- execCommand(&opts, dockerCli, backend),
- pauseCommand(&opts, backend),
- unpauseCommand(&opts, backend),
- topCommand(&opts, backend),
- eventsCommand(&opts, backend),
- portCommand(&opts, backend),
- imagesCommand(&opts, backend),
- versionCommand(),
- buildCommand(&opts, backend),
- pushCommand(&opts, backend),
- pullCommand(&opts, backend),
- createCommand(&opts, backend),
- copyCommand(&opts, backend),
+ c.AddCommand(
+ upCommand(&opts, dockerCli, backendOptions),
+ downCommand(&opts, dockerCli, backendOptions),
+ startCommand(&opts, dockerCli, backendOptions),
+ restartCommand(&opts, dockerCli, backendOptions),
+ stopCommand(&opts, dockerCli, backendOptions),
+ psCommand(&opts, dockerCli, backendOptions),
+ listCommand(dockerCli, backendOptions),
+ logsCommand(&opts, dockerCli, backendOptions),
+ configCommand(&opts, dockerCli),
+ killCommand(&opts, dockerCli, backendOptions),
+ runCommand(&opts, dockerCli, backendOptions),
+ removeCommand(&opts, dockerCli, backendOptions),
+ execCommand(&opts, dockerCli, backendOptions),
+ attachCommand(&opts, dockerCli, backendOptions),
+ exportCommand(&opts, dockerCli, backendOptions),
+ commitCommand(&opts, dockerCli, backendOptions),
+ pauseCommand(&opts, dockerCli, backendOptions),
+ unpauseCommand(&opts, dockerCli, backendOptions),
+ topCommand(&opts, dockerCli, backendOptions),
+ eventsCommand(&opts, dockerCli, backendOptions),
+ portCommand(&opts, dockerCli, backendOptions),
+ imagesCommand(&opts, dockerCli, backendOptions),
+ versionCommand(dockerCli),
+ buildCommand(&opts, dockerCli, backendOptions),
+ pushCommand(&opts, dockerCli, backendOptions),
+ pullCommand(&opts, dockerCli, backendOptions),
+ createCommand(&opts, dockerCli, backendOptions),
+ copyCommand(&opts, dockerCli, backendOptions),
+ waitCommand(&opts, dockerCli, backendOptions),
+ scaleCommand(&opts, dockerCli, backendOptions),
+ statsCommand(&opts, dockerCli),
+ watchCommand(&opts, dockerCli, backendOptions),
+ publishCommand(&opts, dockerCli, backendOptions),
+ alphaCommand(&opts, dockerCli, backendOptions),
+ bridgeCommand(&opts, dockerCli),
+ volumesCommand(&opts, dockerCli, backendOptions),
+ )
+
+ c.Flags().SetInterspersed(false)
+ opts.addProjectFlags(c.Flags())
+ c.RegisterFlagCompletionFunc( //nolint:errcheck
+ "project-name",
+ completeProjectNames(dockerCli, backendOptions),
+ )
+ c.RegisterFlagCompletionFunc( //nolint:errcheck
+ "project-directory",
+ func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{}, cobra.ShellCompDirectiveFilterDirs
+ },
+ )
+ c.RegisterFlagCompletionFunc( //nolint:errcheck
+ "file",
+ func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{"yaml", "yml"}, cobra.ShellCompDirectiveFilterFileExt
+ },
+ )
+ c.RegisterFlagCompletionFunc( //nolint:errcheck
+ "profile",
+ completeProfileNames(dockerCli, &opts),
+ )
+ c.RegisterFlagCompletionFunc( //nolint:errcheck
+ "progress",
+ cobra.FixedCompletions(printerModes, cobra.ShellCompDirectiveNoFileComp),
+ )
+
+ c.Flags().StringVar(&ansi, "ansi", "auto", `Control when to print ANSI control characters ("never"|"always"|"auto")`)
+ c.Flags().IntVar(¶llel, "parallel", -1, `Control max parallelism, -1 for unlimited`)
+ c.Flags().BoolVarP(&version, "version", "v", false, "Show the Docker Compose version information")
+ c.PersistentFlags().BoolVar(&dryRun, "dry-run", false, "Execute command in dry run mode")
+ c.Flags().MarkHidden("version") //nolint:errcheck
+ c.Flags().BoolVar(&noAnsi, "no-ansi", false, `Do not print ANSI control characters (DEPRECATED)`)
+ c.Flags().MarkHidden("no-ansi") //nolint:errcheck
+ c.Flags().BoolVar(&verbose, "verbose", false, "Show more output")
+ c.Flags().MarkHidden("verbose") //nolint:errcheck
+ return c
+}
+
+func stdinfo(dockerCli command.Cli) io.Writer {
+ if stdioToStdout {
+ return dockerCli.Out()
+ }
+ return dockerCli.Err()
+}
+
+func setEnvWithDotEnv(opts ProjectOptions) error {
+ options, err := cli.NewProjectOptions(opts.ConfigPaths,
+ cli.WithWorkingDirectory(opts.ProjectDir),
+ cli.WithOsEnv,
+ cli.WithEnvFiles(opts.EnvFiles...),
+ cli.WithDotEnv,
)
- command.Flags().SetInterspersed(false)
- opts.addProjectFlags(command.Flags())
- command.Flags().StringVar(&ansi, "ansi", "auto", `Control when to print ANSI control characters ("never"|"always"|"auto")`)
- command.Flags().BoolVarP(&version, "version", "v", false, "Show the Docker Compose version information")
- command.Flags().MarkHidden("version") //nolint:errcheck
- command.Flags().BoolVar(&noAnsi, "no-ansi", false, `Do not print ANSI control characters (DEPRECATED)`)
- command.Flags().MarkHidden("no-ansi") //nolint:errcheck
- command.Flags().BoolVar(&verbose, "verbose", false, "Show more output")
- command.Flags().MarkHidden("verbose") //nolint:errcheck
- return command
+ if err != nil {
+ return nil
+ }
+ envFromFile, err := dotenv.GetEnvFromFile(composegoutils.GetAsEqualsMap(os.Environ()), options.EnvFiles)
+ if err != nil {
+ return nil
+ }
+ for k, v := range envFromFile {
+ if _, ok := os.LookupEnv(k); !ok && strings.HasPrefix(k, "COMPOSE_") {
+ if err = os.Setenv(k, v); err != nil {
+ return nil
+ }
+ }
+ }
+ return err
+}
+
+var printerModes = []string{
+ display.ModeAuto,
+ display.ModeTTY,
+ display.ModePlain,
+ display.ModeJSON,
+ display.ModeQuiet,
}
diff --git a/cmd/compose/compose_test.go b/cmd/compose/compose_test.go
index b19404424cc..708929ff8cd 100644
--- a/cmd/compose/compose_test.go
+++ b/cmd/compose/compose_test.go
@@ -19,30 +19,32 @@ package compose
import (
"testing"
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/types"
"gotest.tools/v3/assert"
)
func TestFilterServices(t *testing.T) {
p := &types.Project{
- Services: []types.ServiceConfig{
- {
+ Services: types.Services{
+ "foo": {
Name: "foo",
Links: []string{"bar"},
},
- {
- Name: "bar",
- NetworkMode: types.NetworkModeServicePrefix + "zot",
+ "bar": {
+ Name: "bar",
+ DependsOn: map[string]types.ServiceDependency{
+ "zot": {},
+ },
},
- {
+ "zot": {
Name: "zot",
},
- {
+ "qix": {
Name: "qix",
},
},
}
- err := p.ForServices([]string{"bar"})
+ p, err := p.WithSelectedServices([]string{"bar"})
assert.NilError(t, err)
assert.Equal(t, len(p.Services), 2)
diff --git a/cmd/compose/config.go b/cmd/compose/config.go
new file mode 100644
index 00000000000..14d4d47b96f
--- /dev/null
+++ b/cmd/compose/config.go
@@ -0,0 +1,567 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/compose-spec/compose-go/v2/cli"
+ "github.com/compose-spec/compose-go/v2/template"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/cmd/formatter"
+ "github.com/spf13/cobra"
+ "gopkg.in/yaml.v3"
+
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
+)
+
+type configOptions struct {
+ *ProjectOptions
+ Format string
+ Output string
+ quiet bool
+ resolveImageDigests bool
+ noInterpolate bool
+ noNormalize bool
+ noResolvePath bool
+ noResolveEnv bool
+ services bool
+ volumes bool
+ networks bool
+ models bool
+ profiles bool
+ images bool
+ hash string
+ noConsistency bool
+ variables bool
+ environment bool
+ lockImageDigests bool
+}
+
+func (o *configOptions) ToProject(ctx context.Context, dockerCli command.Cli, backend api.Compose, services []string) (*types.Project, error) {
+ project, _, err := o.ProjectOptions.ToProject(ctx, dockerCli, backend, services, o.toProjectOptionsFns()...)
+ return project, err
+}
+
+func (o *configOptions) ToModel(ctx context.Context, dockerCli command.Cli, services []string, po ...cli.ProjectOptionsFn) (map[string]any, error) {
+ po = append(po, o.toProjectOptionsFns()...)
+ return o.ProjectOptions.ToModel(ctx, dockerCli, services, po...)
+}
+
+// toProjectOptionsFns converts config options to cli.ProjectOptionsFn
+func (o *configOptions) toProjectOptionsFns() []cli.ProjectOptionsFn {
+ fns := []cli.ProjectOptionsFn{
+ cli.WithInterpolation(!o.noInterpolate),
+ cli.WithResolvedPaths(!o.noResolvePath),
+ cli.WithNormalization(!o.noNormalize),
+ cli.WithConsistency(!o.noConsistency),
+ cli.WithDefaultProfiles(o.Profiles...),
+ cli.WithDiscardEnvFile,
+ }
+ if o.noResolveEnv {
+ fns = append(fns, cli.WithoutEnvironmentResolution)
+ }
+ return fns
+}
+
+func configCommand(p *ProjectOptions, dockerCli command.Cli) *cobra.Command {
+ opts := configOptions{
+ ProjectOptions: p,
+ }
+ cmd := &cobra.Command{
+ Use: "config [OPTIONS] [SERVICE...]",
+ Short: "Parse, resolve and render compose file in canonical format",
+ PreRunE: Adapt(func(ctx context.Context, args []string) error {
+ if opts.quiet {
+ devnull, err := os.Open(os.DevNull)
+ if err != nil {
+ return err
+ }
+ os.Stdout = devnull
+ }
+ if p.Compatibility {
+ opts.noNormalize = true
+ }
+ if opts.lockImageDigests {
+ opts.resolveImageDigests = true
+ }
+ return nil
+ }),
+ RunE: Adapt(func(ctx context.Context, args []string) error {
+ if opts.services {
+ return runServices(ctx, dockerCli, opts)
+ }
+ if opts.volumes {
+ return runVolumes(ctx, dockerCli, opts)
+ }
+ if opts.networks {
+ return runNetworks(ctx, dockerCli, opts)
+ }
+ if opts.models {
+ return runModels(ctx, dockerCli, opts)
+ }
+ if opts.hash != "" {
+ return runHash(ctx, dockerCli, opts)
+ }
+ if opts.profiles {
+ return runProfiles(ctx, dockerCli, opts, args)
+ }
+ if opts.images {
+ return runConfigImages(ctx, dockerCli, opts, args)
+ }
+ if opts.variables {
+ return runVariables(ctx, dockerCli, opts, args)
+ }
+ if opts.environment {
+ return runEnvironment(ctx, dockerCli, opts, args)
+ }
+
+ if opts.Format == "" {
+ opts.Format = "yaml"
+ }
+ return runConfig(ctx, dockerCli, opts, args)
+ }),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
+ }
+ flags := cmd.Flags()
+ flags.StringVar(&opts.Format, "format", "", "Format the output. Values: [yaml | json]")
+ flags.BoolVar(&opts.resolveImageDigests, "resolve-image-digests", false, "Pin image tags to digests")
+ flags.BoolVar(&opts.lockImageDigests, "lock-image-digests", false, "Produces an override file with image digests")
+ flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only validate the configuration, don't print anything")
+ flags.BoolVar(&opts.noInterpolate, "no-interpolate", false, "Don't interpolate environment variables")
+ flags.BoolVar(&opts.noNormalize, "no-normalize", false, "Don't normalize compose model")
+ flags.BoolVar(&opts.noResolvePath, "no-path-resolution", false, "Don't resolve file paths")
+ flags.BoolVar(&opts.noConsistency, "no-consistency", false, "Don't check model consistency - warning: may produce invalid Compose output")
+ flags.BoolVar(&opts.noResolveEnv, "no-env-resolution", false, "Don't resolve service env files")
+
+ flags.BoolVar(&opts.services, "services", false, "Print the service names, one per line.")
+ flags.BoolVar(&opts.volumes, "volumes", false, "Print the volume names, one per line.")
+ flags.BoolVar(&opts.networks, "networks", false, "Print the network names, one per line.")
+ flags.BoolVar(&opts.models, "models", false, "Print the model names, one per line.")
+ flags.BoolVar(&opts.profiles, "profiles", false, "Print the profile names, one per line.")
+ flags.BoolVar(&opts.images, "images", false, "Print the image names, one per line.")
+ flags.StringVar(&opts.hash, "hash", "", "Print the service config hash, one per line.")
+ flags.BoolVar(&opts.variables, "variables", false, "Print model variables and default values.")
+ flags.BoolVar(&opts.environment, "environment", false, "Print environment used for interpolation.")
+ flags.StringVarP(&opts.Output, "output", "o", "", "Save to file (default to stdout)")
+
+ return cmd
+}
+
+func runConfig(ctx context.Context, dockerCli command.Cli, opts configOptions, services []string) (err error) {
+ var content []byte
+ if opts.noInterpolate {
+ content, err = runConfigNoInterpolate(ctx, dockerCli, opts, services)
+ if err != nil {
+ return err
+ }
+ } else {
+ content, err = runConfigInterpolate(ctx, dockerCli, opts, services)
+ if err != nil {
+ return err
+ }
+ }
+
+ if !opts.noInterpolate {
+ content = escapeDollarSign(content)
+ }
+
+ if opts.quiet {
+ return nil
+ }
+
+ if opts.Output != "" && len(content) > 0 {
+ return os.WriteFile(opts.Output, content, 0o666)
+ }
+ _, err = fmt.Fprint(dockerCli.Out(), string(content))
+ return err
+}
+
+func runConfigInterpolate(ctx context.Context, dockerCli command.Cli, opts configOptions, services []string) ([]byte, error) {
+ backend, err := compose.NewComposeService(dockerCli)
+ if err != nil {
+ return nil, err
+ }
+
+ project, err := opts.ToProject(ctx, dockerCli, backend, services)
+ if err != nil {
+ return nil, err
+ }
+
+ if opts.resolveImageDigests {
+ project, err = project.WithImagesResolved(compose.ImageDigestResolver(ctx, dockerCli.ConfigFile(), dockerCli.Client()))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if !opts.noResolveEnv {
+ project, err = project.WithServicesEnvironmentResolved(true)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if !opts.noConsistency {
+ err := project.CheckContainerNameUnicity()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if opts.lockImageDigests {
+ project = imagesOnly(project)
+ }
+
+ var content []byte
+ switch opts.Format {
+ case "json":
+ content, err = project.MarshalJSON()
+ case "yaml":
+ content, err = project.MarshalYAML()
+ default:
+ return nil, fmt.Errorf("unsupported format %q", opts.Format)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return content, nil
+}
+
+// imagesOnly return project with all attributes removed but service.images
+func imagesOnly(project *types.Project) *types.Project {
+ digests := types.Services{}
+ for name, config := range project.Services {
+ digests[name] = types.ServiceConfig{
+ Image: config.Image,
+ }
+ }
+ project = &types.Project{Services: digests}
+ return project
+}
+
+func runConfigNoInterpolate(ctx context.Context, dockerCli command.Cli, opts configOptions, services []string) ([]byte, error) {
+ // we can't use ToProject, so the model we render here is only partially resolved
+ model, err := opts.ToModel(ctx, dockerCli, services)
+ if err != nil {
+ return nil, err
+ }
+
+ if opts.resolveImageDigests {
+ err = resolveImageDigests(ctx, dockerCli, model)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if opts.lockImageDigests {
+ for key, e := range model {
+ if key != "services" {
+ delete(model, key)
+ } else {
+ for _, s := range e.(map[string]any) {
+ service := s.(map[string]any)
+ for key := range service {
+ if key != "image" {
+ delete(service, key)
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return formatModel(model, opts.Format)
+}
+
+func resolveImageDigests(ctx context.Context, dockerCli command.Cli, model map[string]any) (err error) {
+ // create a pseudo-project so we can rely on WithImagesResolved to resolve images
+ p := &types.Project{
+ Services: types.Services{},
+ }
+ services := model["services"].(map[string]any)
+ for name, s := range services {
+ service := s.(map[string]any)
+ if image, ok := service["image"]; ok {
+ p.Services[name] = types.ServiceConfig{
+ Image: image.(string),
+ }
+ }
+ }
+
+ p, err = p.WithImagesResolved(compose.ImageDigestResolver(ctx, dockerCli.ConfigFile(), dockerCli.Client()))
+ if err != nil {
+ return err
+ }
+
+ // Collect image resolved with digest and update model accordingly
+ for name, s := range services {
+ service := s.(map[string]any)
+ config := p.Services[name]
+ if config.Image != "" {
+ service["image"] = config.Image
+ }
+ services[name] = service
+ }
+ model["services"] = services
+ return nil
+}
+
+func formatModel(model map[string]any, format string) (content []byte, err error) {
+ switch format {
+ case "json":
+ return json.MarshalIndent(model, "", " ")
+ case "yaml":
+ buf := bytes.NewBuffer([]byte{})
+ encoder := yaml.NewEncoder(buf)
+ encoder.SetIndent(2)
+ err = encoder.Encode(model)
+ return buf.Bytes(), err
+ default:
+ return nil, fmt.Errorf("unsupported format %q", format)
+ }
+}
+
+func runServices(ctx context.Context, dockerCli command.Cli, opts configOptions) error {
+ if opts.noInterpolate {
+ // we can't use ToProject, so the model we render here is only partially resolved
+ data, err := opts.ToModel(ctx, dockerCli, nil, cli.WithoutEnvironmentResolution)
+ if err != nil {
+ return err
+ }
+
+ if _, ok := data["services"]; ok {
+ for serviceName := range data["services"].(map[string]any) {
+ _, _ = fmt.Fprintln(dockerCli.Out(), serviceName)
+ }
+ }
+
+ return nil
+ }
+
+ backend, err := compose.NewComposeService(dockerCli)
+ if err != nil {
+ return err
+ }
+
+ project, _, err := opts.ProjectOptions.ToProject(ctx, dockerCli, backend, nil, cli.WithoutEnvironmentResolution)
+ if err != nil {
+ return err
+ }
+ err = project.ForEachService(project.ServiceNames(), func(serviceName string, _ *types.ServiceConfig) error {
+ _, _ = fmt.Fprintln(dockerCli.Out(), serviceName)
+ return nil
+ })
+
+ return err
+}
+
+func runVolumes(ctx context.Context, dockerCli command.Cli, opts configOptions) error {
+ backend, err := compose.NewComposeService(dockerCli)
+ if err != nil {
+ return err
+ }
+
+ project, _, err := opts.ProjectOptions.ToProject(ctx, dockerCli, backend, nil, cli.WithoutEnvironmentResolution)
+ if err != nil {
+ return err
+ }
+ for n := range project.Volumes {
+ _, _ = fmt.Fprintln(dockerCli.Out(), n)
+ }
+ return nil
+}
+
+func runNetworks(ctx context.Context, dockerCli command.Cli, opts configOptions) error {
+ backend, err := compose.NewComposeService(dockerCli)
+ if err != nil {
+ return err
+ }
+
+ project, _, err := opts.ProjectOptions.ToProject(ctx, dockerCli, backend, nil, cli.WithoutEnvironmentResolution)
+ if err != nil {
+ return err
+ }
+ for n := range project.Networks {
+ _, _ = fmt.Fprintln(dockerCli.Out(), n)
+ }
+ return nil
+}
+
+func runModels(ctx context.Context, dockerCli command.Cli, opts configOptions) error {
+ backend, err := compose.NewComposeService(dockerCli)
+ if err != nil {
+ return err
+ }
+
+ project, _, err := opts.ProjectOptions.ToProject(ctx, dockerCli, backend, nil, cli.WithoutEnvironmentResolution)
+ if err != nil {
+ return err
+ }
+ for _, model := range project.Models {
+ if model.Model != "" {
+ _, _ = fmt.Fprintln(dockerCli.Out(), model.Model)
+ }
+ }
+ return nil
+}
+
+func runHash(ctx context.Context, dockerCli command.Cli, opts configOptions) error {
+ var services []string
+ if opts.hash != "*" {
+ services = append(services, strings.Split(opts.hash, ",")...)
+ }
+
+ backend, err := compose.NewComposeService(dockerCli)
+ if err != nil {
+ return err
+ }
+
+ project, _, err := opts.ProjectOptions.ToProject(ctx, dockerCli, backend, nil, cli.WithoutEnvironmentResolution)
+ if err != nil {
+ return err
+ }
+
+ if err := applyPlatforms(project, true); err != nil {
+ return err
+ }
+
+ if len(services) == 0 {
+ services = project.ServiceNames()
+ }
+
+ sorted := services
+ sort.Slice(sorted, func(i, j int) bool {
+ return sorted[i] < sorted[j]
+ })
+
+ for _, name := range sorted {
+ s, err := project.GetService(name)
+ if err != nil {
+ return err
+ }
+
+ hash, err := compose.ServiceHash(s)
+ if err != nil {
+ return err
+ }
+ _, _ = fmt.Fprintf(dockerCli.Out(), "%s %s\n", name, hash)
+ }
+ return nil
+}
+
+func runProfiles(ctx context.Context, dockerCli command.Cli, opts configOptions, services []string) error {
+ set := map[string]struct{}{}
+
+ backend, err := compose.NewComposeService(dockerCli)
+ if err != nil {
+ return err
+ }
+
+ project, err := opts.ToProject(ctx, dockerCli, backend, services)
+ if err != nil {
+ return err
+ }
+ for _, s := range project.AllServices() {
+ for _, p := range s.Profiles {
+ set[p] = struct{}{}
+ }
+ }
+ profiles := make([]string, 0, len(set))
+ for p := range set {
+ profiles = append(profiles, p)
+ }
+ sort.Strings(profiles)
+ for _, p := range profiles {
+ _, _ = fmt.Fprintln(dockerCli.Out(), p)
+ }
+ return nil
+}
+
+func runConfigImages(ctx context.Context, dockerCli command.Cli, opts configOptions, services []string) error {
+ backend, err := compose.NewComposeService(dockerCli)
+ if err != nil {
+ return err
+ }
+
+ project, err := opts.ToProject(ctx, dockerCli, backend, services)
+ if err != nil {
+ return err
+ }
+
+ for _, s := range project.Services {
+ _, _ = fmt.Fprintln(dockerCli.Out(), api.GetImageNameOrDefault(s, project.Name))
+ }
+ return nil
+}
+
+func runVariables(ctx context.Context, dockerCli command.Cli, opts configOptions, services []string) error {
+ opts.noInterpolate = true
+ model, err := opts.ToModel(ctx, dockerCli, services, cli.WithoutEnvironmentResolution)
+ if err != nil {
+ return err
+ }
+
+ variables := template.ExtractVariables(model, template.DefaultPattern)
+
+ if opts.Format == "yaml" {
+ result, err := yaml.Marshal(variables)
+ if err != nil {
+ return err
+ }
+ fmt.Print(string(result))
+ return nil
+ }
+
+ return formatter.Print(variables, opts.Format, dockerCli.Out(), func(w io.Writer) {
+ for name, variable := range variables {
+ _, _ = fmt.Fprintf(w, "%s\t%t\t%s\t%s\n", name, variable.Required, variable.DefaultValue, variable.PresenceValue)
+ }
+ }, "NAME", "REQUIRED", "DEFAULT VALUE", "ALTERNATE VALUE")
+}
+
+func runEnvironment(ctx context.Context, dockerCli command.Cli, opts configOptions, services []string) error {
+ backend, err := compose.NewComposeService(dockerCli)
+ if err != nil {
+ return err
+ }
+
+ project, err := opts.ToProject(ctx, dockerCli, backend, services)
+ if err != nil {
+ return err
+ }
+
+ for _, v := range project.Environment.Values() {
+ fmt.Println(v)
+ }
+ return nil
+}
+
+func escapeDollarSign(marshal []byte) []byte {
+ dollar := []byte{'$'}
+ escDollar := []byte{'$', '$'}
+ return bytes.ReplaceAll(marshal, dollar, escDollar)
+}
diff --git a/cmd/compose/convert.go b/cmd/compose/convert.go
deleted file mode 100644
index 0780eabac2b..00000000000
--- a/cmd/compose/convert.go
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package compose
-
-import (
- "bufio"
- "context"
- "fmt"
- "io"
- "os"
- "sort"
- "strings"
-
- "github.com/cnabio/cnab-to-oci/remotes"
- "github.com/compose-spec/compose-go/cli"
- "github.com/compose-spec/compose-go/types"
- "github.com/distribution/distribution/v3/reference"
- cliconfig "github.com/docker/cli/cli/config"
- "github.com/opencontainers/go-digest"
- "github.com/spf13/cobra"
-
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/compose"
-)
-
-type convertOptions struct {
- *projectOptions
- Format string
- Output string
- quiet bool
- resolveImageDigests bool
- noInterpolate bool
- noNormalize bool
- services bool
- volumes bool
- profiles bool
- images bool
- hash string
-}
-
-func convertCommand(p *projectOptions, backend api.Service) *cobra.Command {
- opts := convertOptions{
- projectOptions: p,
- }
- cmd := &cobra.Command{
- Aliases: []string{"config"},
- Use: "convert SERVICES",
- Short: "Converts the compose file to platform's canonical format",
- PreRunE: Adapt(func(ctx context.Context, args []string) error {
- if opts.quiet {
- devnull, err := os.Open(os.DevNull)
- if err != nil {
- return err
- }
- os.Stdout = devnull
- }
- if p.Compatibility {
- opts.noNormalize = true
- }
- return nil
- }),
- RunE: Adapt(func(ctx context.Context, args []string) error {
- if opts.services {
- return runServices(opts)
- }
- if opts.volumes {
- return runVolumes(opts)
- }
- if opts.hash != "" {
- return runHash(opts)
- }
- if opts.profiles {
- return runProfiles(opts, args)
- }
- if opts.images {
- return runConfigImages(opts, args)
- }
-
- return runConvert(ctx, backend, opts, args)
- }),
- ValidArgsFunction: serviceCompletion(p),
- }
- flags := cmd.Flags()
- flags.StringVar(&opts.Format, "format", "yaml", "Format the output. Values: [yaml | json]")
- flags.BoolVar(&opts.resolveImageDigests, "resolve-image-digests", false, "Pin image tags to digests.")
- flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only validate the configuration, don't print anything.")
- flags.BoolVar(&opts.noInterpolate, "no-interpolate", false, "Don't interpolate environment variables.")
- flags.BoolVar(&opts.noNormalize, "no-normalize", false, "Don't normalize compose model.")
-
- flags.BoolVar(&opts.services, "services", false, "Print the service names, one per line.")
- flags.BoolVar(&opts.volumes, "volumes", false, "Print the volume names, one per line.")
- flags.BoolVar(&opts.profiles, "profiles", false, "Print the profile names, one per line.")
- flags.BoolVar(&opts.images, "images", false, "Print the image names, one per line.")
- flags.StringVar(&opts.hash, "hash", "", "Print the service config hash, one per line.")
- flags.StringVarP(&opts.Output, "output", "o", "", "Save to file (default to stdout)")
-
- return cmd
-}
-
-func runConvert(ctx context.Context, backend api.Service, opts convertOptions, services []string) error {
- var json []byte
- project, err := opts.toProject(services,
- cli.WithInterpolation(!opts.noInterpolate),
- cli.WithResolvedPaths(true),
- cli.WithNormalization(!opts.noNormalize),
- cli.WithDiscardEnvFile)
-
- if err != nil {
- return err
- }
-
- if opts.resolveImageDigests {
- configFile := cliconfig.LoadDefaultConfigFile(os.Stderr)
-
- resolver := remotes.CreateResolver(configFile)
- err = project.ResolveImages(func(named reference.Named) (digest.Digest, error) {
- _, desc, err := resolver.Resolve(ctx, named.String())
- return desc.Digest, err
- })
- if err != nil {
- return err
- }
- }
-
- json, err = backend.Convert(ctx, project, api.ConvertOptions{
- Format: opts.Format,
- Output: opts.Output,
- })
- if err != nil {
- return err
- }
-
- if opts.quiet {
- return nil
- }
-
- var out io.Writer = os.Stdout
- if opts.Output != "" && len(json) > 0 {
- file, err := os.Create(opts.Output)
- if err != nil {
- return err
- }
- out = bufio.NewWriter(file)
- }
- _, err = fmt.Fprint(out, string(json))
- return err
-}
-
-func runServices(opts convertOptions) error {
- project, err := opts.toProject(nil)
- if err != nil {
- return err
- }
- return project.WithServices(project.ServiceNames(), func(s types.ServiceConfig) error {
- fmt.Println(s.Name)
- return nil
- })
-}
-
-func runVolumes(opts convertOptions) error {
- project, err := opts.toProject(nil)
- if err != nil {
- return err
- }
- for n := range project.Volumes {
- fmt.Println(n)
- }
- return nil
-}
-
-func runHash(opts convertOptions) error {
- var services []string
- if opts.hash != "*" {
- services = append(services, strings.Split(opts.hash, ",")...)
- }
- project, err := opts.toProject(services)
- if err != nil {
- return err
- }
- for _, s := range project.Services {
- hash, err := compose.ServiceHash(s)
- if err != nil {
- return err
- }
- fmt.Printf("%s %s\n", s.Name, hash)
- }
- return nil
-}
-
-func runProfiles(opts convertOptions, services []string) error {
- set := map[string]struct{}{}
- project, err := opts.toProject(services)
- if err != nil {
- return err
- }
- for _, s := range project.AllServices() {
- for _, p := range s.Profiles {
- set[p] = struct{}{}
- }
- }
- profiles := make([]string, 0, len(set))
- for p := range set {
- profiles = append(profiles, p)
- }
- sort.Strings(profiles)
- for _, p := range profiles {
- fmt.Println(p)
- }
- return nil
-}
-
-func runConfigImages(opts convertOptions, services []string) error {
- project, err := opts.toProject(services)
- if err != nil {
- return err
- }
- for _, s := range project.Services {
- if s.Image != "" {
- fmt.Println(s.Image)
- } else {
- fmt.Printf("%s_%s\n", project.Name, s.Name)
- }
- }
- return nil
-}
diff --git a/cmd/compose/cp.go b/cmd/compose/cp.go
index e38b9de42b5..05e61236643 100644
--- a/cmd/compose/cp.go
+++ b/cmd/compose/cp.go
@@ -21,13 +21,15 @@ import (
"errors"
"github.com/docker/cli/cli"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
)
type copyOptions struct {
- *projectOptions
+ *ProjectOptions
source string
destination string
@@ -37,9 +39,9 @@ type copyOptions struct {
copyUIDGID bool
}
-func copyCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func copyCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := copyOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
copyCmd := &cobra.Command{
Use: `cp [OPTIONS] SERVICE:SRC_PATH DEST_PATH|-
@@ -55,29 +57,33 @@ func copyCommand(p *projectOptions, backend api.Service) *cobra.Command {
}
return nil
}),
- RunE: Adapt(func(ctx context.Context, args []string) error {
+ RunE: AdaptCmd(func(ctx context.Context, cmd *cobra.Command, args []string) error {
opts.source = args[0]
opts.destination = args[1]
- return runCopy(ctx, backend, opts)
+ return runCopy(ctx, dockerCli, backendOptions, opts)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
flags := copyCmd.Flags()
- flags.IntVar(&opts.index, "index", 1, "Index of the container if there are multiple instances of a service [default: 1].")
- flags.BoolVar(&opts.all, "all", false, "Copy to all the containers of the service.")
+ flags.IntVar(&opts.index, "index", 0, "Index of the container if service has multiple replicas")
+ flags.BoolVar(&opts.all, "all", false, "Include containers created by the run command")
flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH")
flags.BoolVarP(&opts.copyUIDGID, "archive", "a", false, "Archive mode (copy all uid/gid information)")
return copyCmd
}
-func runCopy(ctx context.Context, backend api.Service, opts copyOptions) error {
- name, err := opts.toProjectName()
+func runCopy(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts copyOptions) error {
+ name, err := opts.toProjectName(ctx, dockerCli)
if err != nil {
return err
}
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
return backend.Copy(ctx, name, api.CopyOptions{
Source: opts.source,
Destination: opts.destination,
diff --git a/cmd/compose/create.go b/cmd/compose/create.go
index 7ce8a1853ad..481fe3277aa 100644
--- a/cmd/compose/create.go
+++ b/cmd/compose/create.go
@@ -19,17 +19,26 @@ package compose
import (
"context"
"fmt"
+ "slices"
+ "strconv"
+ "strings"
"time"
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
+ "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
+ "github.com/spf13/pflag"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
)
type createOptions struct {
Build bool
noBuild bool
+ Pull string
+ pullChanged bool
removeOrphans bool
ignoreOrphans bool
forceRecreate bool
@@ -39,14 +48,20 @@ type createOptions struct {
timeChanged bool
timeout int
quietPull bool
+ scale []string
+ AssumeYes bool
}
-func createCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func createCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := createOptions{}
+ buildOpts := buildOptions{
+ ProjectOptions: p,
+ }
cmd := &cobra.Command{
- Use: "create [SERVICE...]",
- Short: "Creates containers for a service.",
- PreRunE: Adapt(func(ctx context.Context, args []string) error {
+ Use: "create [OPTIONS] [SERVICE...]",
+ Short: "Creates containers for a service",
+ PreRunE: AdaptCmd(func(ctx context.Context, cmd *cobra.Command, args []string) error {
+ opts.pullChanged = cmd.Flags().Changed("pull")
if opts.Build && opts.noBuild {
return fmt.Errorf("--build and --no-build are incompatible")
}
@@ -55,27 +70,67 @@ func createCommand(p *projectOptions, backend api.Service) *cobra.Command {
}
return nil
}),
- RunE: p.WithProject(func(ctx context.Context, project *types.Project) error {
- return backend.Create(ctx, project, api.CreateOptions{
- RemoveOrphans: opts.removeOrphans,
- IgnoreOrphans: opts.ignoreOrphans,
- Recreate: opts.recreateStrategy(),
- RecreateDependencies: opts.dependenciesRecreateStrategy(),
- Inherit: !opts.noInherit,
- Timeout: opts.GetTimeout(),
- QuietPull: false,
- })
+ RunE: p.WithServices(dockerCli, func(ctx context.Context, project *types.Project, services []string) error {
+ return runCreate(ctx, dockerCli, backendOptions, opts, buildOpts, project, services)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
flags := cmd.Flags()
- flags.BoolVar(&opts.Build, "build", false, "Build images before starting containers.")
- flags.BoolVar(&opts.noBuild, "no-build", false, "Don't build an image, even if it's missing.")
- flags.BoolVar(&opts.forceRecreate, "force-recreate", false, "Recreate containers even if their configuration and image haven't changed.")
+ flags.BoolVar(&opts.Build, "build", false, "Build images before starting containers")
+ flags.BoolVar(&opts.noBuild, "no-build", false, "Don't build an image, even if it's policy")
+ flags.StringVar(&opts.Pull, "pull", "policy", `Pull image before running ("always"|"missing"|"never"|"build")`)
+ flags.BoolVar(&opts.quietPull, "quiet-pull", false, "Pull without printing progress information")
+ flags.BoolVar(&opts.forceRecreate, "force-recreate", false, "Recreate containers even if their configuration and image haven't changed")
flags.BoolVar(&opts.noRecreate, "no-recreate", false, "If containers already exist, don't recreate them. Incompatible with --force-recreate.")
+ flags.BoolVar(&opts.removeOrphans, "remove-orphans", false, "Remove containers for services not defined in the Compose file")
+ flags.StringArrayVar(&opts.scale, "scale", []string{}, "Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.")
+ flags.BoolVarP(&opts.AssumeYes, "yes", "y", false, `Assume "yes" as answer to all prompts and run non-interactively`)
+ flags.SetNormalizeFunc(func(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ // assumeYes was introduced by mistake as `--y`
+ if name == "y" {
+ logrus.Warn("--y is deprecated, please use --yes instead")
+ name = "yes"
+ }
+ return pflag.NormalizedName(name)
+ })
return cmd
}
+func runCreate(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, createOpts createOptions, buildOpts buildOptions, project *types.Project, services []string) error {
+ if err := createOpts.Apply(project); err != nil {
+ return err
+ }
+
+ var build *api.BuildOptions
+ if !createOpts.noBuild {
+ bo, err := buildOpts.toAPIBuildOptions(services)
+ if err != nil {
+ return err
+ }
+ build = &bo
+ }
+
+ if createOpts.AssumeYes {
+ backendOptions.Options = append(backendOptions.Options, compose.WithPrompt(compose.AlwaysOkPrompt()))
+ }
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+ return backend.Create(ctx, project, api.CreateOptions{
+ Build: build,
+ Services: services,
+ RemoveOrphans: createOpts.removeOrphans,
+ IgnoreOrphans: createOpts.ignoreOrphans,
+ Recreate: createOpts.recreateStrategy(),
+ RecreateDependencies: createOpts.dependenciesRecreateStrategy(),
+ Inherit: !createOpts.noInherit,
+ Timeout: createOpts.GetTimeout(),
+ QuietPull: createOpts.quietPull,
+ })
+}
+
func (opts createOptions) recreateStrategy() string {
if opts.noRecreate {
return api.RecreateNever
@@ -83,6 +138,9 @@ func (opts createOptions) recreateStrategy() string {
if opts.forceRecreate {
return api.RecreateForce
}
+ if opts.noInherit {
+ return api.RecreateForce
+ }
return api.RecreateDiverged
}
@@ -104,7 +162,19 @@ func (opts createOptions) GetTimeout() *time.Duration {
return nil
}
-func (opts createOptions) Apply(project *types.Project) {
+func (opts createOptions) Apply(project *types.Project) error {
+ if opts.pullChanged {
+ if !opts.isPullPolicyValid() {
+ return fmt.Errorf("invalid --pull option %q", opts.Pull)
+ }
+ for i, service := range project.Services {
+ service.PullPolicy = opts.Pull
+ project.Services[i] = service
+ }
+ }
+ // N.B. opts.Build means "force build all", but images can still be built
+ // when this is false
+ // e.g. if a service has pull_policy: build or its local image is policy
if opts.Build {
for i, service := range project.Services {
if service.Build == nil {
@@ -114,10 +184,41 @@ func (opts createOptions) Apply(project *types.Project) {
project.Services[i] = service
}
}
- if opts.noBuild {
- for i, service := range project.Services {
- service.Build = nil
- project.Services[i] = service
+
+ if err := applyPlatforms(project, true); err != nil {
+ return err
+ }
+
+ err := applyScaleOpts(project, opts.scale)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func applyScaleOpts(project *types.Project, opts []string) error {
+ for _, scale := range opts {
+ split := strings.Split(scale, "=")
+ if len(split) != 2 {
+ return fmt.Errorf("invalid --scale option %q. Should be SERVICE=NUM", scale)
+ }
+ name := split[0]
+ replicas, err := strconv.Atoi(split[1])
+ if err != nil {
+ return err
+ }
+ err = setServiceScale(project, name, replicas)
+ if err != nil {
+ return err
}
}
+ return nil
+}
+
+func (opts createOptions) isPullPolicyValid() bool {
+ pullPolicies := []string{
+ types.PullPolicyAlways, types.PullPolicyNever, types.PullPolicyBuild,
+ types.PullPolicyMissing, types.PullPolicyIfNotPresent,
+ }
+ return slices.Contains(pullPolicies, opts.Pull)
}
diff --git a/cmd/compose/down.go b/cmd/compose/down.go
index daca5164f70..8a3df48390d 100644
--- a/cmd/compose/down.go
+++ b/cmd/compose/down.go
@@ -22,17 +22,18 @@ import (
"os"
"time"
- "github.com/compose-spec/compose-go/types"
- "github.com/docker/compose/v2/pkg/utils"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
+ "github.com/docker/compose/v5/pkg/utils"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
)
type downOptions struct {
- *projectOptions
+ *ProjectOptions
removeOrphans bool
timeChanged bool
timeout int
@@ -40,12 +41,12 @@ type downOptions struct {
images string
}
-func downCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func downCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := downOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
downCmd := &cobra.Command{
- Use: "down",
+ Use: "down [OPTIONS] [SERVICES]",
Short: "Stop and remove containers, networks",
PreRunE: AdaptCmd(func(ctx context.Context, cmd *cobra.Command, args []string) error {
opts.timeChanged = cmd.Flags().Changed("timeout")
@@ -57,19 +58,18 @@ func downCommand(p *projectOptions, backend api.Service) *cobra.Command {
return nil
}),
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runDown(ctx, backend, opts)
+ return runDown(ctx, dockerCli, backendOptions, opts, args)
}),
ValidArgsFunction: noCompletion(),
}
flags := downCmd.Flags()
- removeOrphans := utils.StringToBool(os.Getenv("COMPOSE_REMOVE_ORPHANS "))
- flags.BoolVar(&opts.removeOrphans, "remove-orphans", removeOrphans, "Remove containers for services not defined in the Compose file.")
- flags.IntVarP(&opts.timeout, "timeout", "t", 10, "Specify a shutdown timeout in seconds")
- flags.BoolVarP(&opts.volumes, "volumes", "v", false, " Remove named volumes declared in the `volumes` section of the Compose file and anonymous volumes attached to containers.")
+ removeOrphans := utils.StringToBool(os.Getenv(ComposeRemoveOrphans))
+ flags.BoolVar(&opts.removeOrphans, "remove-orphans", removeOrphans, "Remove containers for services not defined in the Compose file")
+ flags.IntVarP(&opts.timeout, "timeout", "t", 0, "Specify a shutdown timeout in seconds")
+ flags.BoolVarP(&opts.volumes, "volumes", "v", false, `Remove named volumes declared in the "volumes" section of the Compose file and anonymous volumes attached to containers`)
flags.StringVar(&opts.images, "rmi", "", `Remove images used by services. "local" remove only images that don't have a custom tag ("local"|"all")`)
flags.SetNormalizeFunc(func(f *pflag.FlagSet, name string) pflag.NormalizedName {
- switch name {
- case "volume":
+ if name == "volume" {
name = "volumes"
logrus.Warn("--volume is deprecated, please use --volumes")
}
@@ -78,16 +78,10 @@ func downCommand(p *projectOptions, backend api.Service) *cobra.Command {
return downCmd
}
-func runDown(ctx context.Context, backend api.Service, opts downOptions) error {
- name := opts.ProjectName
- var project *types.Project
- if opts.ProjectName == "" {
- p, err := opts.toProject(nil)
- if err != nil {
- return err
- }
- project = p
- name = p.Name
+func runDown(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts downOptions, services []string) error {
+ project, name, err := opts.projectOrName(ctx, dockerCli, services...)
+ if err != nil {
+ return err
}
var timeout *time.Duration
@@ -95,11 +89,16 @@ func runDown(ctx context.Context, backend api.Service, opts downOptions) error {
timeoutValue := time.Duration(opts.timeout) * time.Second
timeout = &timeoutValue
}
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
return backend.Down(ctx, name, api.DownOptions{
RemoveOrphans: opts.removeOrphans,
Project: project,
Timeout: timeout,
Images: opts.images,
Volumes: opts.volumes,
+ Services: services,
})
}
diff --git a/cmd/compose/events.go b/cmd/compose/events.go
index 2f9e711a0e4..fb333e62e78 100644
--- a/cmd/compose/events.go
+++ b/cmd/compose/events.go
@@ -21,43 +21,55 @@ import (
"encoding/json"
"fmt"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
)
type eventsOpts struct {
*composeOptions
- json bool
+ json bool
+ since string
+ until string
}
-func eventsCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func eventsCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := eventsOpts{
composeOptions: &composeOptions{
- projectOptions: p,
+ ProjectOptions: p,
},
}
cmd := &cobra.Command{
- Use: "events [options] [--] [SERVICE...]",
- Short: "Receive real time events from containers.",
+ Use: "events [OPTIONS] [SERVICE...]",
+ Short: "Receive real time events from containers",
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runEvents(ctx, backend, opts, args)
+ return runEvents(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
cmd.Flags().BoolVar(&opts.json, "json", false, "Output events as a stream of json objects")
+ cmd.Flags().StringVar(&opts.since, "since", "", "Show all events created since timestamp")
+ cmd.Flags().StringVar(&opts.until, "until", "", "Stream events until this timestamp")
return cmd
}
-func runEvents(ctx context.Context, backend api.Service, opts eventsOpts, services []string) error {
- project, err := opts.toProjectName()
+func runEvents(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts eventsOpts, services []string) error {
+ name, err := opts.toProjectName(ctx, dockerCli)
if err != nil {
return err
}
- return backend.Events(ctx, project, api.EventsOptions{
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+ return backend.Events(ctx, name, api.EventsOptions{
Services: services,
+ Since: opts.since,
+ Until: opts.until,
Consumer: func(event api.Event) error {
if opts.json {
marshal, err := json.Marshal(map[string]interface{}{
@@ -71,9 +83,9 @@ func runEvents(ctx context.Context, backend api.Service, opts eventsOpts, servic
if err != nil {
return err
}
- fmt.Println(string(marshal))
+ _, _ = fmt.Fprintln(dockerCli.Out(), string(marshal))
} else {
- fmt.Println(event)
+ _, _ = fmt.Fprintln(dockerCli.Out(), event)
}
return nil
},
diff --git a/cmd/compose/exec.go b/cmd/compose/exec.go
index 00faae6c52a..761a7b6da1b 100644
--- a/cmd/compose/exec.go
+++ b/cmd/compose/exec.go
@@ -18,13 +18,18 @@ package compose
import (
"context"
+ "errors"
+ "fmt"
+ "os"
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/types"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/compose"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
+ "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
+ "github.com/spf13/pflag"
)
type execOpts struct {
@@ -43,15 +48,15 @@ type execOpts struct {
interactive bool
}
-func execCommand(p *projectOptions, dockerCli command.Cli, backend api.Service) *cobra.Command {
+func execCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := execOpts{
composeOptions: &composeOptions{
- projectOptions: p,
+ ProjectOptions: p,
},
}
runCmd := &cobra.Command{
- Use: "exec [options] [-e KEY=VAL...] [--] SERVICE COMMAND [ARGS...]",
- Short: "Execute a command in a running container.",
+ Use: "exec [OPTIONS] SERVICE COMMAND [ARGS...]",
+ Short: "Execute a command in a running container",
Args: cobra.MinimumNArgs(2),
PreRunE: Adapt(func(ctx context.Context, args []string) error {
opts.service = args[0]
@@ -59,34 +64,48 @@ func execCommand(p *projectOptions, dockerCli command.Cli, backend api.Service)
return nil
}),
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runExec(ctx, backend, opts)
+ err := runExec(ctx, dockerCli, backendOptions, opts)
+ if err != nil {
+ logrus.Debugf("%v", err)
+ var cliError cli.StatusError
+ if ok := errors.As(err, &cliError); ok {
+ os.Exit(err.(cli.StatusError).StatusCode) //nolint: errorlint
+ }
+ }
+ return err
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
- runCmd.Flags().BoolVarP(&opts.detach, "detach", "d", false, "Detached mode: Run command in the background.")
+ runCmd.Flags().BoolVarP(&opts.detach, "detach", "d", false, "Detached mode: Run command in the background")
runCmd.Flags().StringArrayVarP(&opts.environment, "env", "e", []string{}, "Set environment variables")
- runCmd.Flags().IntVar(&opts.index, "index", 1, "index of the container if there are multiple instances of a service [default: 1].")
- runCmd.Flags().BoolVarP(&opts.privileged, "privileged", "", false, "Give extended privileges to the process.")
- runCmd.Flags().StringVarP(&opts.user, "user", "u", "", "Run the command as this user.")
- runCmd.Flags().BoolVarP(&opts.noTty, "no-TTY", "T", !dockerCli.Out().IsTerminal(), "Disable pseudo-TTY allocation. By default `docker compose exec` allocates a TTY.")
- runCmd.Flags().StringVarP(&opts.workingDir, "workdir", "w", "", "Path to workdir directory for this command.")
+ runCmd.Flags().IntVar(&opts.index, "index", 0, "Index of the container if service has multiple replicas")
+ runCmd.Flags().BoolVarP(&opts.privileged, "privileged", "", false, "Give extended privileges to the process")
+ runCmd.Flags().StringVarP(&opts.user, "user", "u", "", "Run the command as this user")
+ runCmd.Flags().BoolVarP(&opts.noTty, "no-tty", "T", !dockerCli.Out().IsTerminal(), "Disable pseudo-TTY allocation. By default 'docker compose exec' allocates a TTY.")
+ runCmd.Flags().StringVarP(&opts.workingDir, "workdir", "w", "", "Path to workdir directory for this command")
- runCmd.Flags().BoolVarP(&opts.interactive, "interactive", "i", true, "Keep STDIN open even if not attached.")
+ runCmd.Flags().BoolVarP(&opts.interactive, "interactive", "i", true, "Keep STDIN open even if not attached")
runCmd.Flags().MarkHidden("interactive") //nolint:errcheck
- runCmd.Flags().BoolP("tty", "t", true, "Allocate a pseudo-TTY.")
+ runCmd.Flags().BoolP("tty", "t", true, "Allocate a pseudo-TTY")
runCmd.Flags().MarkHidden("tty") //nolint:errcheck
runCmd.Flags().SetInterspersed(false)
+ runCmd.Flags().SetNormalizeFunc(func(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ if name == "no-TTY" { // legacy
+ name = "no-tty"
+ }
+ return pflag.NormalizedName(name)
+ })
return runCmd
}
-func runExec(ctx context.Context, backend api.Service, opts execOpts) error {
- projectName, err := opts.toProjectName()
+func runExec(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts execOpts) error {
+ projectName, err := opts.toProjectName(ctx, dockerCli)
if err != nil {
return err
}
- projectOptions, err := opts.composeOptions.toProjectOptions()
+ projectOptions, err := opts.composeOptions.toProjectOptions() //nolint:staticcheck
if err != nil {
return err
}
@@ -107,10 +126,14 @@ func runExec(ctx context.Context, backend api.Service, opts execOpts) error {
Interactive: opts.interactive,
}
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
exitCode, err := backend.Exec(ctx, projectName, execOpts)
if exitCode != 0 {
- errMsg := ""
- if err != nil {
+ errMsg := fmt.Sprintf("exit status %d", exitCode)
+ if err != nil && err.Error() != "" {
errMsg = err.Error()
}
return cli.StatusError{StatusCode: exitCode, Status: errMsg}
diff --git a/cmd/compose/export.go b/cmd/compose/export.go
new file mode 100644
index 00000000000..5cafa7fd993
--- /dev/null
+++ b/cmd/compose/export.go
@@ -0,0 +1,79 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
+ "github.com/spf13/cobra"
+
+ "github.com/docker/compose/v5/pkg/api"
+)
+
+type exportOptions struct {
+ *ProjectOptions
+
+ service string
+ output string
+ index int
+}
+
+func exportCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
+ options := exportOptions{
+ ProjectOptions: p,
+ }
+ cmd := &cobra.Command{
+ Use: "export [OPTIONS] SERVICE",
+ Short: "Export a service container's filesystem as a tar archive",
+ Args: cobra.MinimumNArgs(1),
+ PreRunE: Adapt(func(ctx context.Context, args []string) error {
+ options.service = args[0]
+ return nil
+ }),
+ RunE: Adapt(func(ctx context.Context, args []string) error {
+ return runExport(ctx, dockerCli, backendOptions, options)
+ }),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
+ }
+
+ flags := cmd.Flags()
+ flags.IntVar(&options.index, "index", 0, "index of the container if service has multiple replicas.")
+ flags.StringVarP(&options.output, "output", "o", "", "Write to a file, instead of STDOUT")
+
+ return cmd
+}
+
+func runExport(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, options exportOptions) error {
+ projectName, err := options.toProjectName(ctx, dockerCli)
+ if err != nil {
+ return err
+ }
+
+ exportOptions := api.ExportOptions{
+ Service: options.service,
+ Index: options.index,
+ Output: options.output,
+ }
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+ return backend.Export(ctx, projectName, exportOptions)
+}
diff --git a/cmd/compose/generate.go b/cmd/compose/generate.go
new file mode 100644
index 00000000000..b6e27a358c5
--- /dev/null
+++ b/cmd/compose/generate.go
@@ -0,0 +1,90 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
+ "github.com/spf13/cobra"
+)
+
+type generateOptions struct {
+ *ProjectOptions
+ Format string
+}
+
+func generateCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
+ opts := generateOptions{
+ ProjectOptions: p,
+ }
+
+ cmd := &cobra.Command{
+ Use: "generate [OPTIONS] [CONTAINERS...]",
+ Short: "EXPERIMENTAL - Generate a Compose file from existing containers",
+ PreRunE: Adapt(func(ctx context.Context, args []string) error {
+ return nil
+ }),
+ RunE: Adapt(func(ctx context.Context, args []string) error {
+ return runGenerate(ctx, dockerCli, backendOptions, opts, args)
+ }),
+ }
+
+ cmd.Flags().StringVar(&opts.ProjectName, "name", "", "Project name to set in the Compose file")
+ cmd.Flags().StringVar(&opts.ProjectDir, "project-dir", "", "Directory to use for the project")
+ cmd.Flags().StringVar(&opts.Format, "format", "yaml", "Format the output. Values: [yaml | json]")
+ return cmd
+}
+
+func runGenerate(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts generateOptions, containers []string) error {
+ _, _ = fmt.Fprintln(os.Stderr, "generate command is EXPERIMENTAL")
+ if len(containers) == 0 {
+ return fmt.Errorf("at least one container must be specified")
+ }
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+ project, err := backend.Generate(ctx, api.GenerateOptions{
+ Containers: containers,
+ ProjectName: opts.ProjectName,
+ })
+ if err != nil {
+ return err
+ }
+
+ var content []byte
+ switch opts.Format {
+ case "json":
+ content, err = project.MarshalJSON()
+ case "yaml":
+ content, err = project.MarshalYAML()
+ default:
+ return fmt.Errorf("unsupported format %q", opts.Format)
+ }
+ if err != nil {
+ return err
+ }
+ fmt.Println(string(content))
+
+ return nil
+}
diff --git a/cmd/compose/images.go b/cmd/compose/images.go
index eebef82ebb0..ca4be27fe32 100644
--- a/cmd/compose/images.go
+++ b/cmd/compose/images.go
@@ -20,46 +20,55 @@ import (
"context"
"fmt"
"io"
- "os"
- "sort"
+ "maps"
+ "slices"
"strings"
+ "time"
+ "github.com/containerd/platforms"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/go-units"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/cmd/formatter"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/utils"
+ "github.com/docker/compose/v5/cmd/formatter"
+ "github.com/docker/compose/v5/pkg/api"
)
type imageOptions struct {
- *projectOptions
- Quiet bool
+ *ProjectOptions
+ Quiet bool
+ Format string
}
-func imagesCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func imagesCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := imageOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
imgCmd := &cobra.Command{
- Use: "images [SERVICE...]",
+ Use: "images [OPTIONS] [SERVICE...]",
Short: "List images used by the created containers",
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runImages(ctx, backend, opts, args)
+ return runImages(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
+ imgCmd.Flags().StringVar(&opts.Format, "format", "table", "Format the output. Values: [table | json]")
imgCmd.Flags().BoolVarP(&opts.Quiet, "quiet", "q", false, "Only display IDs")
return imgCmd
}
-func runImages(ctx context.Context, backend api.Service, opts imageOptions, services []string) error {
- projectName, err := opts.toProjectName()
+func runImages(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts imageOptions, services []string) error {
+ projectName, err := opts.toProjectName(ctx, dockerCli)
if err != nil {
return err
}
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
images, err := backend.Images(ctx, projectName, api.ImagesOptions{
Services: services,
})
@@ -74,23 +83,54 @@ func runImages(ctx context.Context, backend api.Service, opts imageOptions, serv
if i := strings.IndexRune(img.ID, ':'); i >= 0 {
id = id[i+1:]
}
- if !utils.StringContains(ids, id) {
+ if !slices.Contains(ids, id) {
ids = append(ids, id)
}
}
for _, img := range ids {
- fmt.Println(img)
+ _, _ = fmt.Fprintln(dockerCli.Out(), img)
}
return nil
}
+ if opts.Format == "json" {
- sort.Slice(images, func(i, j int) bool {
- return images[i].ContainerName < images[j].ContainerName
- })
+ type img struct {
+ ID string `json:"ID"`
+ ContainerName string `json:"ContainerName"`
+ Repository string `json:"Repository"`
+ Tag string `json:"Tag"`
+ Platform string `json:"Platform"`
+ Size int64 `json:"Size"`
+ Created *time.Time `json:"Created,omitempty"`
+ LastTagTime time.Time `json:"LastTagTime,omitzero"`
+ }
+ // Convert map to slice
+ var imageList []img
+ for ctr, i := range images {
+ lastTagTime := i.LastTagTime
+ imageList = append(imageList, img{
+ ContainerName: ctr,
+ ID: i.ID,
+ Repository: i.Repository,
+ Tag: i.Tag,
+ Platform: platforms.Format(i.Platform),
+ Size: i.Size,
+ Created: i.Created,
+ LastTagTime: lastTagTime,
+ })
+ }
+ json, err := formatter.ToJSON(imageList, "", "")
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintln(dockerCli.Out(), json)
+ return err
+ }
- return formatter.Print(images, formatter.PRETTY, os.Stdout,
+ return formatter.Print(images, opts.Format, dockerCli.Out(),
func(w io.Writer) {
- for _, img := range images {
+ for _, container := range slices.Sorted(maps.Keys(images)) {
+ img := images[container]
id := stringid.TruncateID(img.ID)
size := units.HumanSizeWithPrecision(float64(img.Size), 3)
repo := img.Repository
@@ -101,8 +141,13 @@ func runImages(ctx context.Context, backend api.Service, opts imageOptions, serv
if tag == "" {
tag = ""
}
- _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", img.ContainerName, repo, tag, id, size)
+ created := "N/A"
+ if img.Created != nil {
+ created = units.HumanDuration(time.Now().UTC().Sub(*img.Created)) + " ago"
+ }
+ _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
+ container, repo, tag, platforms.Format(img.Platform), id, size, created)
}
},
- "Container", "Repository", "Tag", "Image Id", "Size")
+ "CONTAINER", "REPOSITORY", "TAG", "PLATFORM", "IMAGE ID", "SIZE", "CREATED")
}
diff --git a/cmd/compose/kill.go b/cmd/compose/kill.go
index eb17f1f1cb5..8c8a0a27b33 100644
--- a/cmd/compose/kill.go
+++ b/cmd/compose/kill.go
@@ -18,45 +18,64 @@ package compose
import (
"context"
+ "errors"
+ "fmt"
+ "os"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/utils"
)
type killOptions struct {
- *projectOptions
- signal string
+ *ProjectOptions
+ removeOrphans bool
+ signal string
}
-func killCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func killCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := killOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
cmd := &cobra.Command{
- Use: "kill [options] [SERVICE...]",
- Short: "Force stop service containers.",
+ Use: "kill [OPTIONS] [SERVICE...]",
+ Short: "Force stop service containers",
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runKill(ctx, backend, opts, args)
+ return runKill(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
flags := cmd.Flags()
- flags.StringVarP(&opts.signal, "signal", "s", "SIGKILL", "SIGNAL to send to the container.")
+ removeOrphans := utils.StringToBool(os.Getenv(ComposeRemoveOrphans))
+ flags.BoolVar(&opts.removeOrphans, "remove-orphans", removeOrphans, "Remove containers for services not defined in the Compose file")
+ flags.StringVarP(&opts.signal, "signal", "s", "SIGKILL", "SIGNAL to send to the container")
return cmd
}
-func runKill(ctx context.Context, backend api.Service, opts killOptions, services []string) error {
- projectName, err := opts.toProjectName()
+func runKill(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts killOptions, services []string) error {
+ project, name, err := opts.projectOrName(ctx, dockerCli, services...)
if err != nil {
return err
}
- return backend.Kill(ctx, projectName, api.KillOptions{
- Services: services,
- Signal: opts.signal,
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+ err = backend.Kill(ctx, name, api.KillOptions{
+ RemoveOrphans: opts.removeOrphans,
+ Project: project,
+ Services: services,
+ Signal: opts.signal,
})
-
+ if errors.Is(err, api.ErrNoResources) {
+ _, _ = fmt.Fprintln(stdinfo(dockerCli), "No container to kill")
+ return nil
+ }
+ return err
}
diff --git a/cmd/compose/list.go b/cmd/compose/list.go
index ac05dc1d129..d7ce49b9ae8 100644
--- a/cmd/compose/list.go
+++ b/cmd/compose/list.go
@@ -20,15 +20,16 @@ import (
"context"
"fmt"
"io"
- "os"
"strings"
- "github.com/docker/compose/v2/cmd/formatter"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/cmd/formatter"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/docker/cli/opts"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
)
type lsOptions struct {
@@ -38,20 +39,21 @@ type lsOptions struct {
Filter opts.FilterOpt
}
-func listCommand(backend api.Service) *cobra.Command {
- opts := lsOptions{Filter: opts.NewFilterOpt()}
+func listCommand(dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
+ lsOpts := lsOptions{Filter: opts.NewFilterOpt()}
lsCmd := &cobra.Command{
- Use: "ls",
+ Use: "ls [OPTIONS]",
Short: "List running compose projects",
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runList(ctx, backend, opts)
+ return runList(ctx, dockerCli, backendOptions, lsOpts)
}),
+ Args: cobra.NoArgs,
ValidArgsFunction: noCompletion(),
}
- lsCmd.Flags().StringVar(&opts.Format, "format", "pretty", "Format the output. Values: [pretty | json].")
- lsCmd.Flags().BoolVarP(&opts.Quiet, "quiet", "q", false, "Only display IDs.")
- lsCmd.Flags().Var(&opts.Filter, "filter", "Filter output based on conditions provided.")
- lsCmd.Flags().BoolVarP(&opts.All, "all", "a", false, "Show all stopped Compose projects")
+ lsCmd.Flags().StringVar(&lsOpts.Format, "format", "table", "Format the output. Values: [table | json]")
+ lsCmd.Flags().BoolVarP(&lsOpts.Quiet, "quiet", "q", false, "Only display project names")
+ lsCmd.Flags().Var(&lsOpts.Filter, "filter", "Filter output based on conditions provided")
+ lsCmd.Flags().BoolVarP(&lsOpts.All, "all", "a", false, "Show all stopped Compose projects")
return lsCmd
}
@@ -60,22 +62,20 @@ var acceptedListFilters = map[string]bool{
"name": true,
}
-func runList(ctx context.Context, backend api.Service, opts lsOptions) error {
- filters := opts.Filter.Value()
+func runList(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, lsOpts lsOptions) error {
+ filters := lsOpts.Filter.Value()
err := filters.Validate(acceptedListFilters)
if err != nil {
return err
}
- stackList, err := backend.List(ctx, api.ListOptions{All: opts.All})
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
if err != nil {
return err
}
- if opts.Quiet {
- for _, s := range stackList {
- fmt.Println(s.Name)
- }
- return nil
+ stackList, err := backend.List(ctx, api.ListOptions{All: lsOpts.All})
+ if err != nil {
+ return err
}
if filters.Len() > 0 {
@@ -89,8 +89,15 @@ func runList(ctx context.Context, backend api.Service, opts lsOptions) error {
stackList = filtered
}
+ if lsOpts.Quiet {
+ for _, s := range stackList {
+ _, _ = fmt.Fprintln(dockerCli.Out(), s.Name)
+ }
+ return nil
+ }
+
view := viewFromStackList(stackList)
- return formatter.Print(view, opts.Format, os.Stdout, func(w io.Writer) {
+ return formatter.Print(view, lsOpts.Format, dockerCli.Out(), func(w io.Writer) {
for _, stack := range view {
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\n", stack.Name, stack.Status, stack.ConfigFiles)
}
diff --git a/cmd/compose/logs.go b/cmd/compose/logs.go
index ce094698af2..185b82a14a5 100644
--- a/cmd/compose/logs.go
+++ b/cmd/compose/logs.go
@@ -18,19 +18,21 @@ package compose
import (
"context"
- "os"
-
- "github.com/docker/compose/v2/cmd/formatter"
+ "errors"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/cmd/formatter"
+ "github.com/docker/compose/v5/pkg/api"
)
type logsOptions struct {
- *projectOptions
+ *ProjectOptions
composeOptions
follow bool
+ index int
tail string
since string
until string
@@ -39,41 +41,93 @@ type logsOptions struct {
timestamps bool
}
-func logsCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func logsCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := logsOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
logsCmd := &cobra.Command{
- Use: "logs [SERVICE...]",
+ Use: "logs [OPTIONS] [SERVICE...]",
Short: "View output from containers",
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runLogs(ctx, backend, opts, args)
+ return runLogs(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ if opts.index > 0 && len(args) != 1 {
+ return errors.New("--index requires one service to be selected")
+ }
+ return nil
+ },
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
flags := logsCmd.Flags()
- flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output.")
+ flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output")
+ flags.IntVar(&opts.index, "index", 0, "index of the container if service has multiple replicas")
flags.StringVar(&opts.since, "since", "", "Show logs since timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)")
flags.StringVar(&opts.until, "until", "", "Show logs before a timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)")
- flags.BoolVar(&opts.noColor, "no-color", false, "Produce monochrome output.")
- flags.BoolVar(&opts.noPrefix, "no-log-prefix", false, "Don't print prefix in logs.")
- flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps.")
- flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs for each container.")
+ flags.BoolVar(&opts.noColor, "no-color", false, "Produce monochrome output")
+ flags.BoolVar(&opts.noPrefix, "no-log-prefix", false, "Don't print prefix in logs")
+ flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps")
+ flags.StringVarP(&opts.tail, "tail", "n", "all", "Number of lines to show from the end of the logs for each container")
return logsCmd
}
-func runLogs(ctx context.Context, backend api.Service, opts logsOptions, services []string) error {
- projectName, err := opts.toProjectName()
+func runLogs(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts logsOptions, services []string) error {
+ project, name, err := opts.projectOrName(ctx, dockerCli, services...)
+ if err != nil {
+ return err
+ }
+
+ // exclude services configured to ignore output (attach: false), until explicitly selected
+ if project != nil && len(services) == 0 {
+ for n, service := range project.Services {
+ if service.Attach == nil || *service.Attach {
+ services = append(services, n)
+ }
+ }
+ }
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
if err != nil {
return err
}
- consumer := formatter.NewLogConsumer(ctx, os.Stdout, !opts.noColor, !opts.noPrefix)
- return backend.Logs(ctx, projectName, consumer, api.LogOptions{
+ consumer := formatter.NewLogConsumer(ctx, dockerCli.Out(), dockerCli.Err(), !opts.noColor, !opts.noPrefix, false)
+ return backend.Logs(ctx, name, consumer, api.LogOptions{
+ Project: project,
Services: services,
Follow: opts.follow,
+ Index: opts.index,
Tail: opts.tail,
Since: opts.since,
Until: opts.until,
Timestamps: opts.timestamps,
})
}
+
+var _ api.LogConsumer = &logConsumer{}
+
+type logConsumer struct {
+ events api.EventProcessor
+}
+
+func (l logConsumer) Log(containerName, message string) {
+ l.events.On(api.Resource{
+ ID: containerName,
+ Text: message,
+ })
+}
+
+func (l logConsumer) Err(containerName, message string) {
+ l.events.On(api.Resource{
+ ID: containerName,
+ Status: api.Error,
+ Text: message,
+ })
+}
+
+func (l logConsumer) Status(containerName, message string) {
+ l.events.On(api.Resource{
+ ID: containerName,
+ Status: api.Error,
+ Text: message,
+ })
+}
diff --git a/cmd/compose/options.go b/cmd/compose/options.go
new file mode 100644
index 00000000000..f243bd6f58b
--- /dev/null
+++ b/cmd/compose/options.go
@@ -0,0 +1,291 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "slices"
+ "sort"
+ "strings"
+ "text/tabwriter"
+
+ "github.com/compose-spec/compose-go/v2/cli"
+ "github.com/compose-spec/compose-go/v2/template"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/cmd/display"
+ "github.com/docker/compose/v5/cmd/prompt"
+ "github.com/docker/compose/v5/internal/tracing"
+)
+
+func applyPlatforms(project *types.Project, buildForSinglePlatform bool) error {
+ defaultPlatform := project.Environment["DOCKER_DEFAULT_PLATFORM"]
+ for name, service := range project.Services {
+ if service.Build == nil {
+ continue
+ }
+
+ // default platform only applies if the service doesn't specify
+ if defaultPlatform != "" && service.Platform == "" {
+ if len(service.Build.Platforms) > 0 && !slices.Contains(service.Build.Platforms, defaultPlatform) {
+ return fmt.Errorf("service %q build.platforms does not support value set by DOCKER_DEFAULT_PLATFORM: %s", name, defaultPlatform)
+ }
+ service.Platform = defaultPlatform
+ }
+
+ if service.Platform != "" {
+ if len(service.Build.Platforms) > 0 {
+ if !slices.Contains(service.Build.Platforms, service.Platform) {
+ return fmt.Errorf("service %q build configuration does not support platform: %s", name, service.Platform)
+ }
+ }
+
+ if buildForSinglePlatform || len(service.Build.Platforms) == 0 {
+ // if we're building for a single platform, we want to build for the platform we'll use to run the image
+ // similarly, if no build platforms were explicitly specified, it makes sense to build for the platform
+ // the image is designed for rather than allowing the builder to infer the platform
+ service.Build.Platforms = []string{service.Platform}
+ }
+ }
+
+ // services can specify that they should be built for multiple platforms, which can be used
+ // with `docker compose build` to produce a multi-arch image
+ // other cases, such as `up` and `run`, need a single architecture to actually run
+ // if there is only a single platform present (which might have been inferred
+ // from service.Platform above), it will be used, even if it requires emulation.
+ // if there's more than one platform, then the list is cleared so that the builder
+ // can decide.
+ // TODO(milas): there's no validation that the platform the builder will pick is actually one
+ // of the supported platforms from the build definition
+ // e.g. `build.platforms: [linux/arm64, linux/amd64]` on a `linux/ppc64` machine would build
+ // for `linux/ppc64` instead of returning an error that it's not a valid platform for the service.
+ if buildForSinglePlatform && len(service.Build.Platforms) > 1 {
+ // empty indicates that the builder gets to decide
+ service.Build.Platforms = nil
+ }
+ project.Services[name] = service
+ }
+ return nil
+}
+
+// isRemoteConfig checks if the main compose file is from a remote source (OCI or Git)
+func isRemoteConfig(dockerCli command.Cli, options buildOptions) bool {
+ if len(options.ConfigPaths) == 0 {
+ return false
+ }
+ remoteLoaders := options.remoteLoaders(dockerCli)
+ for _, loader := range remoteLoaders {
+ if loader.Accept(options.ConfigPaths[0]) {
+ return true
+ }
+ }
+ return false
+}
+
+// checksForRemoteStack handles environment variable prompts for remote configurations
+func checksForRemoteStack(ctx context.Context, dockerCli command.Cli, project *types.Project, options buildOptions, assumeYes bool, cmdEnvs []string) error {
+ if !isRemoteConfig(dockerCli, options) {
+ return nil
+ }
+ if metrics, ok := ctx.Value(tracing.MetricsKey{}).(tracing.Metrics); ok && metrics.CountIncludesRemote > 0 {
+ if err := confirmRemoteIncludes(dockerCli, options, assumeYes); err != nil {
+ return err
+ }
+ }
+ displayLocationRemoteStack(dockerCli, project, options)
+ return promptForInterpolatedVariables(ctx, dockerCli, options.ProjectOptions, assumeYes, cmdEnvs)
+}
+
+// Prepare the values map and collect all variables info
+type varInfo struct {
+ name string
+ value string
+ source string
+ required bool
+ defaultValue string
+}
+
+// promptForInterpolatedVariables displays all variables and their values at once,
+// then prompts for confirmation
+func promptForInterpolatedVariables(ctx context.Context, dockerCli command.Cli, projectOptions *ProjectOptions, assumeYes bool, cmdEnvs []string) error {
+ if assumeYes {
+ return nil
+ }
+
+ varsInfo, noVariables, err := extractInterpolationVariablesFromModel(ctx, dockerCli, projectOptions, cmdEnvs)
+ if err != nil {
+ return err
+ }
+
+ if noVariables {
+ return nil
+ }
+
+ displayInterpolationVariables(dockerCli.Out(), varsInfo)
+
+ // Prompt for confirmation
+ userInput := prompt.NewPrompt(dockerCli.In(), dockerCli.Out())
+ msg := "\nDo you want to proceed with these variables? [Y/n]: "
+ confirmed, err := userInput.Confirm(msg, true)
+ if err != nil {
+ return err
+ }
+
+ if !confirmed {
+ return fmt.Errorf("operation cancelled by user")
+ }
+
+ return nil
+}
+
+func extractInterpolationVariablesFromModel(ctx context.Context, dockerCli command.Cli, projectOptions *ProjectOptions, cmdEnvs []string) ([]varInfo, bool, error) {
+ cmdEnvMap := extractEnvCLIDefined(cmdEnvs)
+
+ // Create a model without interpolation to extract variables
+ opts := configOptions{
+ noInterpolate: true,
+ ProjectOptions: projectOptions,
+ }
+
+ model, err := opts.ToModel(ctx, dockerCli, nil, cli.WithoutEnvironmentResolution)
+ if err != nil {
+ return nil, false, err
+ }
+
+ // Extract variables that need interpolation
+ variables := template.ExtractVariables(model, template.DefaultPattern)
+ if len(variables) == 0 {
+ return nil, true, nil
+ }
+
+ var varsInfo []varInfo
+ proposedValues := make(map[string]string)
+
+ for name, variable := range variables {
+ info := varInfo{
+ name: name,
+ required: variable.Required,
+ defaultValue: variable.DefaultValue,
+ }
+
+ // Determine value and source based on priority
+ if value, exists := cmdEnvMap[name]; exists {
+ info.value = value
+ info.source = "command-line"
+ proposedValues[name] = value
+ } else if value, exists := os.LookupEnv(name); exists {
+ info.value = value
+ info.source = "environment"
+ proposedValues[name] = value
+ } else if variable.DefaultValue != "" {
+ info.value = variable.DefaultValue
+ info.source = "compose file"
+ proposedValues[name] = variable.DefaultValue
+ } else {
+ info.value = ""
+ info.source = "none"
+ }
+
+ varsInfo = append(varsInfo, info)
+ }
+ return varsInfo, false, nil
+}
+
+func extractEnvCLIDefined(cmdEnvs []string) map[string]string {
+ // Parse command-line environment variables
+ cmdEnvMap := make(map[string]string)
+ for _, env := range cmdEnvs {
+ parts := strings.SplitN(env, "=", 2)
+ if len(parts) == 2 {
+ cmdEnvMap[parts[0]] = parts[1]
+ }
+ }
+ return cmdEnvMap
+}
+
+func displayInterpolationVariables(writer io.Writer, varsInfo []varInfo) {
+ // Display all variables in a table format
+ _, _ = fmt.Fprintln(writer, "\nFound the following variables in configuration:")
+
+ w := tabwriter.NewWriter(writer, 0, 0, 3, ' ', 0)
+ _, _ = fmt.Fprintln(w, "VARIABLE\tVALUE\tSOURCE\tREQUIRED\tDEFAULT")
+ sort.Slice(varsInfo, func(a, b int) bool {
+ return varsInfo[a].name < varsInfo[b].name
+ })
+ for _, info := range varsInfo {
+ required := "no"
+ if info.required {
+ required = "yes"
+ }
+ _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
+ info.name,
+ info.value,
+ info.source,
+ required,
+ info.defaultValue,
+ )
+ }
+ _ = w.Flush()
+}
+
+func displayLocationRemoteStack(dockerCli command.Cli, project *types.Project, options buildOptions) {
+ mainComposeFile := options.ProjectOptions.ConfigPaths[0] //nolint:staticcheck
+ if display.Mode != display.ModeQuiet && display.Mode != display.ModeJSON {
+ _, _ = fmt.Fprintf(dockerCli.Out(), "Your compose stack %q is stored in %q\n", mainComposeFile, project.WorkingDir)
+ }
+}
+
+func confirmRemoteIncludes(dockerCli command.Cli, options buildOptions, assumeYes bool) error {
+ if assumeYes {
+ return nil
+ }
+
+ var remoteIncludes []string
+ remoteLoaders := options.ProjectOptions.remoteLoaders(dockerCli) //nolint:staticcheck
+ for _, cf := range options.ProjectOptions.ConfigPaths { //nolint:staticcheck
+ for _, loader := range remoteLoaders {
+ if loader.Accept(cf) {
+ remoteIncludes = append(remoteIncludes, cf)
+ break
+ }
+ }
+ }
+
+ if len(remoteIncludes) == 0 {
+ return nil
+ }
+
+ _, _ = fmt.Fprintln(dockerCli.Out(), "\nWarning: This Compose project includes files from remote sources:")
+ for _, include := range remoteIncludes {
+ _, _ = fmt.Fprintf(dockerCli.Out(), " - %s\n", include)
+ }
+ _, _ = fmt.Fprintln(dockerCli.Out(), "\nRemote includes could potentially be malicious. Make sure you trust the source.")
+
+ msg := "Do you want to continue? [y/N]: "
+ confirmed, err := prompt.NewPrompt(dockerCli.In(), dockerCli.Out()).Confirm(msg, false)
+ if err != nil {
+ return err
+ }
+ if !confirmed {
+ return fmt.Errorf("operation cancelled by user")
+ }
+
+ return nil
+}
diff --git a/cmd/compose/options_test.go b/cmd/compose/options_test.go
new file mode 100644
index 00000000000..cac2f830230
--- /dev/null
+++ b/cmd/compose/options_test.go
@@ -0,0 +1,394 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli/streams"
+ "github.com/docker/compose/v5/pkg/mocks"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+)
+
+func TestApplyPlatforms_InferFromRuntime(t *testing.T) {
+ makeProject := func() *types.Project {
+ return &types.Project{
+ Services: types.Services{
+ "test": {
+ Name: "test",
+ Image: "foo",
+ Build: &types.BuildConfig{
+ Context: ".",
+ Platforms: []string{
+ "linux/amd64",
+ "linux/arm64",
+ "alice/32",
+ },
+ },
+ Platform: "alice/32",
+ },
+ },
+ }
+ }
+
+ t.Run("SinglePlatform", func(t *testing.T) {
+ project := makeProject()
+ require.NoError(t, applyPlatforms(project, true))
+ require.EqualValues(t, []string{"alice/32"}, project.Services["test"].Build.Platforms)
+ })
+
+ t.Run("MultiPlatform", func(t *testing.T) {
+ project := makeProject()
+ require.NoError(t, applyPlatforms(project, false))
+ require.EqualValues(t, []string{"linux/amd64", "linux/arm64", "alice/32"},
+ project.Services["test"].Build.Platforms)
+ })
+}
+
+func TestApplyPlatforms_DockerDefaultPlatform(t *testing.T) {
+ makeProject := func() *types.Project {
+ return &types.Project{
+ Environment: map[string]string{
+ "DOCKER_DEFAULT_PLATFORM": "linux/amd64",
+ },
+ Services: types.Services{
+ "test": {
+ Name: "test",
+ Image: "foo",
+ Build: &types.BuildConfig{
+ Context: ".",
+ Platforms: []string{
+ "linux/amd64",
+ "linux/arm64",
+ },
+ },
+ },
+ },
+ }
+ }
+
+ t.Run("SinglePlatform", func(t *testing.T) {
+ project := makeProject()
+ require.NoError(t, applyPlatforms(project, true))
+ require.EqualValues(t, []string{"linux/amd64"}, project.Services["test"].Build.Platforms)
+ })
+
+ t.Run("MultiPlatform", func(t *testing.T) {
+ project := makeProject()
+ require.NoError(t, applyPlatforms(project, false))
+ require.EqualValues(t, []string{"linux/amd64", "linux/arm64"},
+ project.Services["test"].Build.Platforms)
+ })
+}
+
+func TestApplyPlatforms_UnsupportedPlatform(t *testing.T) {
+ makeProject := func() *types.Project {
+ return &types.Project{
+ Environment: map[string]string{
+ "DOCKER_DEFAULT_PLATFORM": "commodore/64",
+ },
+ Services: types.Services{
+ "test": {
+ Name: "test",
+ Image: "foo",
+ Build: &types.BuildConfig{
+ Context: ".",
+ Platforms: []string{
+ "linux/amd64",
+ "linux/arm64",
+ },
+ },
+ },
+ },
+ }
+ }
+
+ t.Run("SinglePlatform", func(t *testing.T) {
+ project := makeProject()
+ require.EqualError(t, applyPlatforms(project, true),
+ `service "test" build.platforms does not support value set by DOCKER_DEFAULT_PLATFORM: commodore/64`)
+ })
+
+ t.Run("MultiPlatform", func(t *testing.T) {
+ project := makeProject()
+ require.EqualError(t, applyPlatforms(project, false),
+ `service "test" build.platforms does not support value set by DOCKER_DEFAULT_PLATFORM: commodore/64`)
+ })
+}
+
+func TestIsRemoteConfig(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ cli := mocks.NewMockCli(ctrl)
+
+ tests := []struct {
+ name string
+ configPaths []string
+ want bool
+ }{
+ {
+ name: "empty config paths",
+ configPaths: []string{},
+ want: false,
+ },
+ {
+ name: "local file",
+ configPaths: []string{"docker-compose.yaml"},
+ want: false,
+ },
+ {
+ name: "OCI reference",
+ configPaths: []string{"oci://registry.example.com/stack:latest"},
+ want: true,
+ },
+ {
+ name: "GIT reference",
+ configPaths: []string{"git://github.com/user/repo.git"},
+ want: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ opts := buildOptions{
+ ProjectOptions: &ProjectOptions{
+ ConfigPaths: tt.configPaths,
+ },
+ }
+ got := isRemoteConfig(cli, opts)
+ require.Equal(t, tt.want, got)
+ })
+ }
+}
+
+func TestDisplayLocationRemoteStack(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ cli := mocks.NewMockCli(ctrl)
+
+ buf := new(bytes.Buffer)
+ cli.EXPECT().Out().Return(streams.NewOut(buf)).AnyTimes()
+
+ project := &types.Project{
+ Name: "test-project",
+ WorkingDir: "/tmp/test",
+ }
+
+ options := buildOptions{
+ ProjectOptions: &ProjectOptions{
+ ConfigPaths: []string{"oci://registry.example.com/stack:latest"},
+ },
+ }
+
+ displayLocationRemoteStack(cli, project, options)
+
+ output := buf.String()
+ require.Equal(t, output, fmt.Sprintf("Your compose stack %q is stored in %q\n", "oci://registry.example.com/stack:latest", "/tmp/test"))
+}
+
+func TestDisplayInterpolationVariables(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ // Create a temporary directory for the test
+ tmpDir, err := os.MkdirTemp("", "compose-test")
+ require.NoError(t, err)
+ defer func() { _ = os.RemoveAll(tmpDir) }()
+
+ // Create a temporary compose file
+ composeContent := `
+services:
+ app:
+ image: nginx
+ environment:
+ - TEST_VAR=${TEST_VAR:?required} # required with default
+ - API_KEY=${API_KEY:?} # required without default
+ - DEBUG=${DEBUG:-true} # optional with default
+ - UNSET_VAR # optional without default
+`
+ composePath := filepath.Join(tmpDir, "docker-compose.yml")
+ err = os.WriteFile(composePath, []byte(composeContent), 0o644)
+ require.NoError(t, err)
+
+ buf := new(bytes.Buffer)
+ cli := mocks.NewMockCli(ctrl)
+ cli.EXPECT().Out().Return(streams.NewOut(buf)).AnyTimes()
+
+ // Create ProjectOptions with the temporary compose file
+ projectOptions := &ProjectOptions{
+ ConfigPaths: []string{composePath},
+ }
+
+ // Set up the context with necessary environment variables
+ ctx := context.Background()
+ _ = os.Setenv("TEST_VAR", "test-value")
+ _ = os.Setenv("API_KEY", "123456")
+ defer func() {
+ _ = os.Unsetenv("TEST_VAR")
+ _ = os.Unsetenv("API_KEY")
+ }()
+
+ // Extract variables from the model
+ info, noVariables, err := extractInterpolationVariablesFromModel(ctx, cli, projectOptions, []string{})
+ require.NoError(t, err)
+ require.False(t, noVariables)
+
+ // Display the variables
+ displayInterpolationVariables(cli.Out(), info)
+
+ // Expected output format with proper spacing
+ expected := "\nFound the following variables in configuration:\n" +
+ "VARIABLE VALUE SOURCE REQUIRED DEFAULT\n" +
+ "API_KEY 123456 environment yes \n" +
+ "DEBUG true compose file no true\n" +
+ "TEST_VAR test-value environment yes \n"
+
+ // Normalize spaces and newlines for comparison
+ normalizeSpaces := func(s string) string {
+ // Replace multiple spaces with a single space
+ s = strings.Join(strings.Fields(strings.TrimSpace(s)), " ")
+ return s
+ }
+
+ actualOutput := buf.String()
+
+ // Compare normalized strings
+ require.Equal(t,
+ normalizeSpaces(expected),
+ normalizeSpaces(actualOutput),
+ "\nExpected:\n%s\nGot:\n%s", expected, actualOutput)
+}
+
+func TestConfirmRemoteIncludes(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ cli := mocks.NewMockCli(ctrl)
+
+ tests := []struct {
+ name string
+ opts buildOptions
+ assumeYes bool
+ userInput string
+ wantErr bool
+ errMessage string
+ wantPrompt bool
+ wantOutput string
+ }{
+ {
+ name: "no remote includes",
+ opts: buildOptions{
+ ProjectOptions: &ProjectOptions{
+ ConfigPaths: []string{
+ "docker-compose.yaml",
+ "./local/path/compose.yaml",
+ },
+ },
+ },
+ assumeYes: false,
+ wantErr: false,
+ wantPrompt: false,
+ },
+ {
+ name: "assume yes with remote includes",
+ opts: buildOptions{
+ ProjectOptions: &ProjectOptions{
+ ConfigPaths: []string{
+ "oci://registry.example.com/stack:latest",
+ "git://github.com/user/repo.git",
+ },
+ },
+ },
+ assumeYes: true,
+ wantErr: false,
+ wantPrompt: false,
+ },
+ {
+ name: "user confirms remote includes",
+ opts: buildOptions{
+ ProjectOptions: &ProjectOptions{
+ ConfigPaths: []string{
+ "oci://registry.example.com/stack:latest",
+ "git://github.com/user/repo.git",
+ },
+ },
+ },
+ assumeYes: false,
+ userInput: "y\n",
+ wantErr: false,
+ wantPrompt: true,
+ wantOutput: "\nWarning: This Compose project includes files from remote sources:\n" +
+ " - oci://registry.example.com/stack:latest\n" +
+ " - git://github.com/user/repo.git\n" +
+ "\nRemote includes could potentially be malicious. Make sure you trust the source.\n" +
+ "Do you want to continue? [y/N]: ",
+ },
+ {
+ name: "user rejects remote includes",
+ opts: buildOptions{
+ ProjectOptions: &ProjectOptions{
+ ConfigPaths: []string{
+ "oci://registry.example.com/stack:latest",
+ },
+ },
+ },
+ assumeYes: false,
+ userInput: "n\n",
+ wantErr: true,
+ errMessage: "operation cancelled by user",
+ wantPrompt: true,
+ wantOutput: "\nWarning: This Compose project includes files from remote sources:\n" +
+ " - oci://registry.example.com/stack:latest\n" +
+ "\nRemote includes could potentially be malicious. Make sure you trust the source.\n" +
+ "Do you want to continue? [y/N]: ",
+ },
+ }
+
+ buf := new(bytes.Buffer)
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cli.EXPECT().Out().Return(streams.NewOut(buf)).AnyTimes()
+
+ if tt.wantPrompt {
+ inbuf := io.NopCloser(bytes.NewBufferString(tt.userInput))
+ cli.EXPECT().In().Return(streams.NewIn(inbuf)).AnyTimes()
+ }
+
+ err := confirmRemoteIncludes(cli, tt.opts, tt.assumeYes)
+
+ if tt.wantErr {
+ require.Error(t, err)
+ require.Equal(t, tt.errMessage, err.Error())
+ } else {
+ require.NoError(t, err)
+ }
+
+ if tt.wantOutput != "" {
+ require.Equal(t, tt.wantOutput, buf.String())
+ }
+ buf.Reset()
+ })
+ }
+}
diff --git a/cmd/compose/pause.go b/cmd/compose/pause.go
index f51f47c7a60..b7e00b7804e 100644
--- a/cmd/compose/pause.go
+++ b/cmd/compose/pause.go
@@ -19,67 +19,79 @@ package compose
import (
"context"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
)
type pauseOptions struct {
- *projectOptions
+ *ProjectOptions
}
-func pauseCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func pauseCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := pauseOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
cmd := &cobra.Command{
Use: "pause [SERVICE...]",
Short: "Pause services",
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runPause(ctx, backend, opts, args)
+ return runPause(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
return cmd
}
-func runPause(ctx context.Context, backend api.Service, opts pauseOptions, services []string) error {
- project, err := opts.toProjectName()
+func runPause(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts pauseOptions, services []string) error {
+ project, name, err := opts.projectOrName(ctx, dockerCli, services...)
if err != nil {
return err
}
- return backend.Pause(ctx, project, api.PauseOptions{
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+ return backend.Pause(ctx, name, api.PauseOptions{
Services: services,
+ Project: project,
})
}
type unpauseOptions struct {
- *projectOptions
+ *ProjectOptions
}
-func unpauseCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func unpauseCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := unpauseOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
cmd := &cobra.Command{
Use: "unpause [SERVICE...]",
Short: "Unpause services",
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runUnPause(ctx, backend, opts, args)
+ return runUnPause(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
return cmd
}
-func runUnPause(ctx context.Context, backend api.Service, opts unpauseOptions, services []string) error {
- project, err := opts.toProjectName()
+func runUnPause(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts unpauseOptions, services []string) error {
+ project, name, err := opts.projectOrName(ctx, dockerCli, services...)
if err != nil {
return err
}
- return backend.UnPause(ctx, project, api.PauseOptions{
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+ return backend.UnPause(ctx, name, api.PauseOptions{
Services: services,
+ Project: project,
})
}
diff --git a/cmd/compose/port.go b/cmd/compose/port.go
index 6ffd6d1f93f..a117926fd19 100644
--- a/cmd/compose/port.go
+++ b/cmd/compose/port.go
@@ -20,47 +20,56 @@ import (
"context"
"fmt"
"strconv"
+ "strings"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
)
type portOptions struct {
- *projectOptions
- port int
+ *ProjectOptions
+ port uint16
protocol string
index int
}
-func portCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func portCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := portOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
cmd := &cobra.Command{
- Use: "port [options] [--] SERVICE PRIVATE_PORT",
- Short: "Print the public port for a port binding.",
+ Use: "port [OPTIONS] SERVICE PRIVATE_PORT",
+ Short: "Print the public port for a port binding",
Args: cobra.MinimumNArgs(2),
PreRunE: Adapt(func(ctx context.Context, args []string) error {
- port, err := strconv.Atoi(args[1])
+ port, err := strconv.ParseUint(args[1], 10, 16)
if err != nil {
return err
}
- opts.port = port
+ opts.port = uint16(port)
+ opts.protocol = strings.ToLower(opts.protocol)
return nil
}),
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runPort(ctx, backend, opts, args[0])
+ return runPort(ctx, dockerCli, backendOptions, opts, args[0])
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
cmd.Flags().StringVar(&opts.protocol, "protocol", "tcp", "tcp or udp")
- cmd.Flags().IntVar(&opts.index, "index", 1, "index of the container if service has multiple replicas")
+ cmd.Flags().IntVar(&opts.index, "index", 0, "Index of the container if service has multiple replicas")
return cmd
}
-func runPort(ctx context.Context, backend api.Service, opts portOptions, service string) error {
- projectName, err := opts.toProjectName()
+func runPort(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts portOptions, service string) error {
+ projectName, err := opts.toProjectName(ctx, dockerCli)
+ if err != nil {
+ return err
+ }
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
if err != nil {
return err
}
@@ -72,6 +81,6 @@ func runPort(ctx context.Context, backend api.Service, opts portOptions, service
return err
}
- fmt.Printf("%s:%d\n", ip, port)
+ _, _ = fmt.Fprintf(dockerCli.Out(), "%s:%d\n", ip, port)
return nil
}
diff --git a/cmd/compose/ps.go b/cmd/compose/ps.go
index c395241adcc..4059774161b 100644
--- a/cmd/compose/ps.go
+++ b/cmd/compose/ps.go
@@ -18,31 +18,32 @@ package compose
import (
"context"
+ "errors"
"fmt"
- "io"
- "os"
+ "slices"
"sort"
- "strconv"
"strings"
- "github.com/docker/compose/v2/cmd/formatter"
- "github.com/docker/compose/v2/pkg/utils"
+ "github.com/docker/compose/v5/cmd/formatter"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
- formatter2 "github.com/docker/cli/cli/command/formatter"
- "github.com/pkg/errors"
+ "github.com/docker/cli/cli/command"
+ cliformatter "github.com/docker/cli/cli/command/formatter"
+ cliflags "github.com/docker/cli/cli/flags"
"github.com/spf13/cobra"
-
- "github.com/docker/compose/v2/pkg/api"
)
type psOptions struct {
- *projectOptions
+ *ProjectOptions
Format string
All bool
Quiet bool
Services bool
Filter string
Status []string
+ noTrunc bool
+ Orphans bool
}
func (p *psOptions) parseFilter() error {
@@ -64,54 +65,66 @@ func (p *psOptions) parseFilter() error {
return nil
}
-func psCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func psCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := psOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
psCmd := &cobra.Command{
- Use: "ps [SERVICE...]",
+ Use: "ps [OPTIONS] [SERVICE...]",
Short: "List containers",
PreRunE: func(cmd *cobra.Command, args []string) error {
return opts.parseFilter()
},
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runPs(ctx, backend, args, opts)
+ return runPs(ctx, dockerCli, backendOptions, args, opts)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
flags := psCmd.Flags()
- flags.StringVar(&opts.Format, "format", "pretty", "Format the output. Values: [pretty | json]")
- flags.StringVar(&opts.Filter, "filter", "", "Filter services by a property (supported filters: status).")
+ flags.StringVar(&opts.Format, "format", "table", cliflags.FormatHelp)
+ flags.StringVar(&opts.Filter, "filter", "", "Filter services by a property (supported filters: status)")
flags.StringArrayVar(&opts.Status, "status", []string{}, "Filter services by status. Values: [paused | restarting | removing | running | dead | created | exited]")
flags.BoolVarP(&opts.Quiet, "quiet", "q", false, "Only display IDs")
flags.BoolVar(&opts.Services, "services", false, "Display services")
+ flags.BoolVar(&opts.Orphans, "orphans", true, "Include orphaned services (not declared by project)")
flags.BoolVarP(&opts.All, "all", "a", false, "Show all stopped containers (including those created by the run command)")
+ flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output")
return psCmd
}
-func runPs(ctx context.Context, backend api.Service, services []string, opts psOptions) error {
- projectName, err := opts.toProjectName()
+func runPs(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, services []string, opts psOptions) error { //nolint:gocyclo
+ project, name, err := opts.projectOrName(ctx, dockerCli, services...)
+ if err != nil {
+ return err
+ }
+
+ if project != nil {
+ names := project.ServiceNames()
+ if len(services) > 0 {
+ for _, service := range services {
+ if !slices.Contains(names, service) {
+ return fmt.Errorf("no such service: %s", service)
+ }
+ }
+ } else if !opts.Orphans {
+ // until user asks to list orphaned services, we only include those declared in project
+ services = names
+ }
+ }
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
if err != nil {
return err
}
- containers, err := backend.Ps(ctx, projectName, api.PsOptions{
- All: opts.All,
+ containers, err := backend.Ps(ctx, name, api.PsOptions{
+ Project: project,
+ All: opts.All || len(opts.Status) != 0,
Services: services,
})
if err != nil {
return err
}
-SERVICES:
- for _, s := range services {
- for _, c := range containers {
- if c.Service == s {
- continue SERVICES
- }
- }
- return fmt.Errorf("no such service: %s", s)
- }
-
if len(opts.Status) != 0 {
containers = filterByStatus(containers, opts.Status)
}
@@ -122,41 +135,33 @@ SERVICES:
if opts.Quiet {
for _, c := range containers {
- fmt.Println(c.ID)
+ _, _ = fmt.Fprintln(dockerCli.Out(), c.ID)
}
return nil
}
if opts.Services {
services := []string{}
- for _, s := range containers {
- if !utils.StringContains(services, s.Service) {
- services = append(services, s.Service)
+ for _, c := range containers {
+ s := c.Service
+ if !slices.Contains(services, s) {
+ services = append(services, s)
}
}
- fmt.Println(strings.Join(services, "\n"))
+ _, _ = fmt.Fprintln(dockerCli.Out(), strings.Join(services, "\n"))
return nil
}
- return formatter.Print(containers, opts.Format, os.Stdout,
- writter(containers),
- "NAME", "COMMAND", "SERVICE", "STATUS", "PORTS")
-}
+ if opts.Format == "" {
+ opts.Format = dockerCli.ConfigFile().PsFormat
+ }
-func writter(containers []api.ContainerSummary) func(w io.Writer) {
- return func(w io.Writer) {
- for _, container := range containers {
- ports := DisplayablePorts(container)
- status := container.State
- if status == "running" && container.Health != "" {
- status = fmt.Sprintf("%s (%s)", container.State, container.Health)
- } else if status == "exited" || status == "dead" {
- status = fmt.Sprintf("%s (%d)", container.State, container.ExitCode)
- }
- command := formatter2.Ellipsis(container.Command, 20)
- _, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", container.Name, strconv.Quote(command), container.Service, status, ports)
- }
+ containerCtx := cliformatter.Context{
+ Output: dockerCli.Out(),
+ Format: formatter.NewContainerFormat(opts.Format, opts.Quiet, false),
+ Trunc: !opts.noTrunc,
}
+ return formatter.ContainerWrite(containerCtx, containers)
}
func filterByStatus(containers []api.ContainerSummary, statuses []string) []api.ContainerSummary {
@@ -177,73 +182,3 @@ func hasStatus(c api.ContainerSummary, statuses []string) bool {
}
return false
}
-
-type portRange struct {
- pStart int
- pEnd int
- tStart int
- tEnd int
- IP string
- protocol string
-}
-
-func (pr portRange) String() string {
- var (
- pub string
- tgt string
- )
-
- if pr.pEnd > pr.pStart {
- pub = fmt.Sprintf("%s:%d-%d->", pr.IP, pr.pStart, pr.pEnd)
- } else if pr.pStart > 0 {
- pub = fmt.Sprintf("%s:%d->", pr.IP, pr.pStart)
- }
- if pr.tEnd > pr.tStart {
- tgt = fmt.Sprintf("%d-%d", pr.tStart, pr.tEnd)
- } else {
- tgt = fmt.Sprintf("%d", pr.tStart)
- }
- return fmt.Sprintf("%s%s/%s", pub, tgt, pr.protocol)
-}
-
-// DisplayablePorts is copy pasted from https://github.com/docker/cli/pull/581/files
-func DisplayablePorts(c api.ContainerSummary) string {
- if c.Publishers == nil {
- return ""
- }
-
- sort.Sort(c.Publishers)
-
- pr := portRange{}
- ports := []string{}
- for _, p := range c.Publishers {
- prIsRange := pr.tEnd != pr.tStart
- tOverlaps := p.TargetPort <= pr.tEnd
-
- // Start a new port-range if:
- // - the protocol is different from the current port-range
- // - published or target port are not consecutive to the current port-range
- // - the current port-range is a _range_, and the target port overlaps with the current range's target-ports
- if p.Protocol != pr.protocol || p.URL != pr.IP || p.PublishedPort-pr.pEnd > 1 || p.TargetPort-pr.tEnd > 1 || prIsRange && tOverlaps {
- // start a new port-range, and print the previous port-range (if any)
- if pr.pStart > 0 {
- ports = append(ports, pr.String())
- }
- pr = portRange{
- pStart: p.PublishedPort,
- pEnd: p.PublishedPort,
- tStart: p.TargetPort,
- tEnd: p.TargetPort,
- protocol: p.Protocol,
- IP: p.URL,
- }
- continue
- }
- pr.pEnd = p.PublishedPort
- pr.tEnd = p.TargetPort
- }
- if pr.tStart > 0 {
- ports = append(ports, pr.String())
- }
- return strings.Join(ports, ", ")
-}
diff --git a/cmd/compose/publish.go b/cmd/compose/publish.go
new file mode 100644
index 00000000000..45d5caf7b83
--- /dev/null
+++ b/cmd/compose/publish.go
@@ -0,0 +1,101 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "errors"
+
+ "github.com/docker/cli/cli"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+)
+
+type publishOptions struct {
+ *ProjectOptions
+ resolveImageDigests bool
+ ociVersion string
+ withEnvironment bool
+ assumeYes bool
+ app bool
+ insecureRegistry bool
+}
+
+func publishCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
+ opts := publishOptions{
+ ProjectOptions: p,
+ }
+ cmd := &cobra.Command{
+ Use: "publish [OPTIONS] REPOSITORY[:TAG]",
+ Short: "Publish compose application",
+ RunE: Adapt(func(ctx context.Context, args []string) error {
+ return runPublish(ctx, dockerCli, backendOptions, opts, args[0])
+ }),
+ Args: cli.ExactArgs(1),
+ }
+ flags := cmd.Flags()
+ flags.BoolVar(&opts.resolveImageDigests, "resolve-image-digests", false, "Pin image tags to digests")
+ flags.StringVar(&opts.ociVersion, "oci-version", "", "OCI image/artifact specification version (automatically determined by default)")
+ flags.BoolVar(&opts.withEnvironment, "with-env", false, "Include environment variables in the published OCI artifact")
+ flags.BoolVarP(&opts.assumeYes, "yes", "y", false, `Assume "yes" as answer to all prompts`)
+ flags.BoolVar(&opts.app, "app", false, "Published compose application (includes referenced images)")
+ flags.BoolVar(&opts.insecureRegistry, "insecure-registry", false, "Use insecure registry")
+ flags.SetNormalizeFunc(func(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ // assumeYes was introduced by mistake as `--y`
+ if name == "y" {
+ logrus.Warn("--y is deprecated, please use --yes instead")
+ name = "yes"
+ }
+ return pflag.NormalizedName(name)
+ })
+ // Should **only** be used for testing purpose, we don't want to promote use of insecure registries
+ _ = flags.MarkHidden("insecure-registry")
+
+ return cmd
+}
+
+func runPublish(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts publishOptions, repository string) error {
+ if opts.assumeYes {
+ backendOptions.Options = append(backendOptions.Options, compose.WithPrompt(compose.AlwaysOkPrompt()))
+ }
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+
+ project, metrics, err := opts.ToProject(ctx, dockerCli, backend, nil)
+ if err != nil {
+ return err
+ }
+
+ if metrics.CountIncludesLocal > 0 {
+ return errors.New("cannot publish compose file with local includes")
+ }
+
+ return backend.Publish(ctx, project, repository, api.PublishOptions{
+ ResolveImageDigests: opts.resolveImageDigests || opts.app,
+ Application: opts.app,
+ OCIVersion: api.OCIVersion(opts.ociVersion),
+ WithEnvironment: opts.withEnvironment,
+ InsecureRegistry: opts.insecureRegistry,
+ })
+}
diff --git a/cmd/compose/pull.go b/cmd/compose/pull.go
index 59e948ed8ae..c3ec6d52261 100644
--- a/cmd/compose/pull.go
+++ b/cmd/compose/pull.go
@@ -21,73 +21,102 @@ import (
"fmt"
"os"
+ "github.com/compose-spec/compose-go/v2/cli"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/morikuni/aec"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/utils"
+ "github.com/docker/compose/v5/pkg/api"
)
type pullOptions struct {
- *projectOptions
+ *ProjectOptions
composeOptions
quiet bool
parallel bool
noParallel bool
includeDeps bool
ignorePullFailures bool
+ noBuildable bool
+ policy string
}
-func pullCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func pullCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := pullOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
cmd := &cobra.Command{
- Use: "pull [SERVICE...]",
+ Use: "pull [OPTIONS] [SERVICE...]",
Short: "Pull service images",
- PreRunE: Adapt(func(ctx context.Context, args []string) error {
- if opts.noParallel {
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ if cmd.Flags().Changed("no-parallel") {
fmt.Fprint(os.Stderr, aec.Apply("option '--no-parallel' is DEPRECATED and will be ignored.\n", aec.RedF))
}
+ if cmd.Flags().Changed("parallel") {
+ fmt.Fprint(os.Stderr, aec.Apply("option '--parallel' is DEPRECATED and will be ignored.\n", aec.RedF))
+ }
return nil
- }),
+ },
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runPull(ctx, backend, opts, args)
+ return runPull(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
flags := cmd.Flags()
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Pull without printing progress information")
cmd.Flags().BoolVar(&opts.includeDeps, "include-deps", false, "Also pull services declared as dependencies")
- cmd.Flags().BoolVar(&opts.parallel, "parallel", true, "DEPRECATED pull multiple images in parallel.")
+ cmd.Flags().BoolVar(&opts.parallel, "parallel", true, "DEPRECATED pull multiple images in parallel")
flags.MarkHidden("parallel") //nolint:errcheck
- cmd.Flags().BoolVar(&opts.parallel, "no-parallel", true, "DEPRECATED disable parallel pulling.")
+ cmd.Flags().BoolVar(&opts.noParallel, "no-parallel", true, "DEPRECATED disable parallel pulling")
flags.MarkHidden("no-parallel") //nolint:errcheck
cmd.Flags().BoolVar(&opts.ignorePullFailures, "ignore-pull-failures", false, "Pull what it can and ignores images with pull failures")
+ cmd.Flags().BoolVar(&opts.noBuildable, "ignore-buildable", false, "Ignore images that can be built")
+ cmd.Flags().StringVar(&opts.policy, "policy", "", `Apply pull policy ("missing"|"always")`)
return cmd
}
-func runPull(ctx context.Context, backend api.Service, opts pullOptions, services []string) error {
- project, err := opts.toProject(services)
- if err != nil {
- return err
- }
-
+func (opts pullOptions) apply(project *types.Project, services []string) (*types.Project, error) {
if !opts.includeDeps {
- enabled, err := project.GetServices(services...)
+ var err error
+ project, err = project.WithSelectedServices(services, types.IgnoreDependencies)
if err != nil {
- return err
+ return nil, err
}
- for _, s := range project.Services {
- if !utils.StringContains(services, s.Name) {
- project.DisabledServices = append(project.DisabledServices, s)
+ }
+
+ if opts.policy != "" {
+ for i, service := range project.Services {
+ if service.Image == "" {
+ continue
}
+ service.PullPolicy = opts.policy
+ project.Services[i] = service
}
- project.Services = enabled
+ }
+ return project, nil
+}
+
+func runPull(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts pullOptions, services []string) error {
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+
+ project, _, err := opts.ToProject(ctx, dockerCli, backend, services, cli.WithoutEnvironmentResolution)
+ if err != nil {
+ return err
+ }
+
+ project, err = opts.apply(project, services)
+ if err != nil {
+ return err
}
return backend.Pull(ctx, project, api.PullOptions{
- Quiet: opts.quiet,
- IgnoreFailures: opts.ignorePullFailures,
+ Quiet: opts.quiet,
+ IgnoreFailures: opts.ignorePullFailures,
+ IgnoreBuildable: opts.noBuildable,
})
}
diff --git a/cmd/compose/pullOptions_test.go b/cmd/compose/pullOptions_test.go
new file mode 100644
index 00000000000..05dd868edf7
--- /dev/null
+++ b/cmd/compose/pullOptions_test.go
@@ -0,0 +1,57 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "testing"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "gotest.tools/v3/assert"
+)
+
+func TestApplyPullOptions(t *testing.T) {
+ project := &types.Project{
+ Services: types.Services{
+ "must-build": {
+ Name: "must-build",
+ // No image, local build only
+ Build: &types.BuildConfig{
+ Context: ".",
+ },
+ },
+ "has-build": {
+ Name: "has-build",
+ Image: "registry.example.com/myservice",
+ Build: &types.BuildConfig{
+ Context: ".",
+ },
+ },
+ "must-pull": {
+ Name: "must-pull",
+ Image: "registry.example.com/another-service",
+ },
+ },
+ }
+ project, err := pullOptions{
+ policy: types.PullPolicyMissing,
+ }.apply(project, nil)
+ assert.NilError(t, err)
+
+ assert.Equal(t, project.Services["must-build"].PullPolicy, "") // still default
+ assert.Equal(t, project.Services["has-build"].PullPolicy, types.PullPolicyMissing)
+ assert.Equal(t, project.Services["must-pull"].PullPolicy, types.PullPolicyMissing)
+}
diff --git a/cmd/compose/push.go b/cmd/compose/push.go
index 7e84e4c1129..39d85f8a8f8 100644
--- a/cmd/compose/push.go
+++ b/cmd/compose/push.go
@@ -19,42 +19,61 @@ package compose
import (
"context"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
)
type pushOptions struct {
- *projectOptions
+ *ProjectOptions
composeOptions
-
+ IncludeDeps bool
Ignorefailures bool
+ Quiet bool
}
-func pushCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func pushCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := pushOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
pushCmd := &cobra.Command{
- Use: "push [SERVICE...]",
+ Use: "push [OPTIONS] [SERVICE...]",
Short: "Push service images",
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runPush(ctx, backend, opts, args)
+ return runPush(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
pushCmd.Flags().BoolVar(&opts.Ignorefailures, "ignore-push-failures", false, "Push what it can and ignores images with push failures")
+ pushCmd.Flags().BoolVar(&opts.IncludeDeps, "include-deps", false, "Also push images of services declared as dependencies")
+ pushCmd.Flags().BoolVarP(&opts.Quiet, "quiet", "q", false, "Push without printing progress information")
return pushCmd
}
-func runPush(ctx context.Context, backend api.Service, opts pushOptions, services []string) error {
- project, err := opts.toProject(services)
+func runPush(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts pushOptions, services []string) error {
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
if err != nil {
return err
}
+ project, _, err := opts.ToProject(ctx, dockerCli, backend, services)
+ if err != nil {
+ return err
+ }
+
+ if !opts.IncludeDeps {
+ project, err = project.WithSelectedServices(services, types.IgnoreDependencies)
+ if err != nil {
+ return err
+ }
+ }
+
return backend.Push(ctx, project, api.PushOptions{
IgnoreFailures: opts.Ignorefailures,
+ Quiet: opts.Quiet,
})
}
diff --git a/cmd/compose/remove.go b/cmd/compose/remove.go
index 8f0bbd21f07..6501afb9fe5 100644
--- a/cmd/compose/remove.go
+++ b/cmd/compose/remove.go
@@ -18,24 +18,28 @@ package compose
import (
"context"
+ "errors"
+ "fmt"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
)
type removeOptions struct {
- *projectOptions
+ *ProjectOptions
force bool
stop bool
volumes bool
}
-func removeCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func removeCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := removeOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
cmd := &cobra.Command{
- Use: "rm [SERVICE...]",
+ Use: "rm [OPTIONS] [SERVICE...]",
Short: "Removes stopped service containers",
Long: `Removes stopped service containers
@@ -44,9 +48,9 @@ can override this with -v. To list all volumes, use "docker volume ls".
Any data which is not in a volume will be lost.`,
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runRemove(ctx, backend, opts, args)
+ return runRemove(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
f := cmd.Flags()
f.BoolVarP(&opts.force, "force", "f", false, "Don't ask to confirm removal")
@@ -58,24 +62,26 @@ Any data which is not in a volume will be lost.`,
return cmd
}
-func runRemove(ctx context.Context, backend api.Service, opts removeOptions, services []string) error {
- project, err := opts.toProjectName()
+func runRemove(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts removeOptions, services []string) error {
+ project, name, err := opts.projectOrName(ctx, dockerCli, services...)
if err != nil {
return err
}
- if opts.stop {
- err := backend.Stop(ctx, project, api.StopOptions{
- Services: services,
- })
- if err != nil {
- return err
- }
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
}
-
- return backend.Remove(ctx, project, api.RemoveOptions{
+ err = backend.Remove(ctx, name, api.RemoveOptions{
Services: services,
Force: opts.force,
Volumes: opts.volumes,
+ Project: project,
+ Stop: opts.stop,
})
+ if errors.Is(err, api.ErrNoResources) {
+ _, _ = fmt.Fprintln(stdinfo(dockerCli), "No stopped containers")
+ return nil
+ }
+ return err
}
diff --git a/cmd/compose/restart.go b/cmd/compose/restart.go
index f691c31fb55..a2880d3c8e6 100644
--- a/cmd/compose/restart.go
+++ b/cmd/compose/restart.go
@@ -20,43 +20,69 @@ import (
"context"
"time"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
)
type restartOptions struct {
- *projectOptions
- timeout int
+ *ProjectOptions
+ timeChanged bool
+ timeout int
+ noDeps bool
}
-func restartCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func restartCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := restartOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
restartCmd := &cobra.Command{
- Use: "restart",
- Short: "Restart containers",
+ Use: "restart [OPTIONS] [SERVICE...]",
+ Short: "Restart service containers",
+ PreRun: func(cmd *cobra.Command, args []string) {
+ opts.timeChanged = cmd.Flags().Changed("timeout")
+ },
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runRestart(ctx, backend, opts, args)
+ return runRestart(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
flags := restartCmd.Flags()
- flags.IntVarP(&opts.timeout, "timeout", "t", 10, "Specify a shutdown timeout in seconds")
+ flags.IntVarP(&opts.timeout, "timeout", "t", 0, "Specify a shutdown timeout in seconds")
+ flags.BoolVar(&opts.noDeps, "no-deps", false, "Don't restart dependent services")
return restartCmd
}
-func runRestart(ctx context.Context, backend api.Service, opts restartOptions, services []string) error {
- projectName, err := opts.toProjectName()
+func runRestart(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts restartOptions, services []string) error {
+ project, name, err := opts.projectOrName(ctx, dockerCli)
if err != nil {
return err
}
- timeout := time.Duration(opts.timeout) * time.Second
- return backend.Restart(ctx, projectName, api.RestartOptions{
- Timeout: &timeout,
+ if project != nil && len(services) > 0 {
+ project, err = project.WithServicesEnabled(services...)
+ if err != nil {
+ return err
+ }
+ }
+
+ var timeout *time.Duration
+ if opts.timeChanged {
+ timeoutValue := time.Duration(opts.timeout) * time.Second
+ timeout = &timeoutValue
+ }
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+ return backend.Restart(ctx, name, api.RestartOptions{
+ Timeout: timeout,
Services: services,
+ Project: project,
+ NoDeps: opts.noDeps,
})
}
diff --git a/cmd/compose/run.go b/cmd/compose/run.go
index ef7e127db8e..e086a1f5570 100644
--- a/cmd/compose/run.go
+++ b/cmd/compose/run.go
@@ -19,19 +19,27 @@ package compose
import (
"context"
"fmt"
+ "os"
"strings"
- cgo "github.com/compose-spec/compose-go/cli"
- "github.com/compose-spec/compose-go/loader"
- "github.com/compose-spec/compose-go/types"
+ composecli "github.com/compose-spec/compose-go/v2/cli"
+ "github.com/compose-spec/compose-go/v2/dotenv"
+ "github.com/compose-spec/compose-go/v2/format"
+ "github.com/docker/compose/v5/cmd/display"
+ "github.com/docker/compose/v5/pkg/compose"
+ xprogress "github.com/moby/buildkit/util/progress/progressui"
+ "github.com/sirupsen/logrus"
+
+ "github.com/compose-spec/compose-go/v2/types"
"github.com/docker/cli/cli/command"
+ "github.com/docker/cli/opts"
"github.com/mattn/go-shellwords"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/docker/cli/cli"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/progress"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/utils"
)
type runOptions struct {
@@ -39,6 +47,7 @@ type runOptions struct {
Service string
Command []string
environment []string
+ envFiles []string
Detach bool
Remove bool
noTty bool
@@ -47,6 +56,8 @@ type runOptions struct {
workdir string
entrypoint string
entrypointCmd []string
+ capAdd opts.ListOpts
+ capDrop opts.ListOpts
labels []string
volumes []string
publish []string
@@ -55,115 +66,195 @@ type runOptions struct {
name string
noDeps bool
ignoreOrphans bool
+ removeOrphans bool
+ quiet bool
quietPull bool
}
-func (opts runOptions) apply(project *types.Project) error {
- target, err := project.GetService(opts.Service)
- if err != nil {
- return err
+func (options runOptions) apply(project *types.Project) (*types.Project, error) {
+ if options.noDeps {
+ var err error
+ project, err = project.WithSelectedServices([]string{options.Service}, types.IgnoreDependencies)
+ if err != nil {
+ return nil, err
+ }
}
- target.Tty = !opts.noTty
- target.StdinOpen = opts.interactive
- if !opts.servicePorts {
- target.Ports = []types.ServicePortConfig{}
+ target, err := project.GetService(options.Service)
+ if err != nil {
+ return nil, err
}
- if len(opts.publish) > 0 {
+
+ target.Tty = !options.noTty
+ target.StdinOpen = options.interactive
+
+ // --service-ports and --publish are incompatible
+ if !options.servicePorts {
+ if len(target.Ports) > 0 {
+ logrus.Debug("Running service without ports exposed as --service-ports=false")
+ }
target.Ports = []types.ServicePortConfig{}
- for _, p := range opts.publish {
+ for _, p := range options.publish {
config, err := types.ParsePortConfig(p)
if err != nil {
- return err
+ return nil, err
}
target.Ports = append(target.Ports, config...)
}
}
- if len(opts.volumes) > 0 {
- for _, v := range opts.volumes {
- volume, err := loader.ParseVolume(v)
- if err != nil {
- return err
- }
- target.Volumes = append(target.Volumes, volume)
+
+ for _, v := range options.volumes {
+ volume, err := format.ParseVolume(v)
+ if err != nil {
+ return nil, err
}
+ target.Volumes = append(target.Volumes, volume)
}
- if opts.noDeps {
- for _, s := range project.Services {
- if s.Name != opts.Service {
- project.DisabledServices = append(project.DisabledServices, s)
- }
+ for name := range project.Services {
+ if name == options.Service {
+ project.Services[name] = target
+ break
}
- project.Services = types.Services{target}
}
+ return project, nil
+}
- for i, s := range project.Services {
- if s.Name == opts.Service {
- project.Services[i] = target
- break
+func (options runOptions) getEnvironment(resolve func(string) (string, bool)) (types.Mapping, error) {
+ environment := types.NewMappingWithEquals(options.environment).Resolve(resolve).ToMapping()
+ for _, file := range options.envFiles {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ vars, err := dotenv.ParseWithLookup(f, func(k string) (string, bool) {
+ value, ok := environment[k]
+ return value, ok
+ })
+ if err != nil {
+ return nil, nil
+ }
+ for k, v := range vars {
+ if _, ok := environment[k]; !ok {
+ environment[k] = v
+ }
}
}
- return nil
+ return environment, nil
}
-func runCommand(p *projectOptions, dockerCli command.Cli, backend api.Service) *cobra.Command {
- opts := runOptions{
+func runCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command { //nolint:gocyclo
+ options := runOptions{
composeOptions: &composeOptions{
- projectOptions: p,
+ ProjectOptions: p,
},
+ capAdd: opts.NewListOpts(nil),
+ capDrop: opts.NewListOpts(nil),
+ }
+ createOpts := createOptions{}
+ buildOpts := buildOptions{
+ ProjectOptions: p,
}
+ // We remove the attribute from the option struct and use a dedicated var, to limit confusion and avoid anyone to use options.tty.
+ // The tty flag is here for convenience and let user do "docker compose run -it" the same way as they use the "docker run" command.
+ var ttyFlag bool
+
cmd := &cobra.Command{
- Use: "run [options] [-v VOLUME...] [-p PORT...] [-e KEY=VAL...] [-l KEY=VALUE...] SERVICE [COMMAND] [ARGS...]",
- Short: "Run a one-off command on a service.",
+ Use: "run [OPTIONS] SERVICE [COMMAND] [ARGS...]",
+ Short: "Run a one-off command on a service",
Args: cobra.MinimumNArgs(1),
PreRunE: AdaptCmd(func(ctx context.Context, cmd *cobra.Command, args []string) error {
- opts.Service = args[0]
+ options.Service = args[0]
if len(args) > 1 {
- opts.Command = args[1:]
+ options.Command = args[1:]
}
- if len(opts.publish) > 0 && opts.servicePorts {
+ if len(options.publish) > 0 && options.servicePorts {
return fmt.Errorf("--service-ports and --publish are incompatible")
}
if cmd.Flags().Changed("entrypoint") {
- command, err := shellwords.Parse(opts.entrypoint)
+ command, err := shellwords.Parse(options.entrypoint)
+ if err != nil {
+ return err
+ }
+ options.entrypointCmd = command
+ }
+ if cmd.Flags().Changed("tty") {
+ if cmd.Flags().Changed("no-TTY") {
+ return fmt.Errorf("--tty and --no-TTY can't be used together")
+ } else {
+ options.noTty = !ttyFlag
+ }
+ } else if !cmd.Flags().Changed("no-TTY") && !cmd.Flags().Changed("interactive") && !dockerCli.In().IsTerminal() {
+ // while `docker run` requires explicit `-it` flags, Compose enables interactive mode and TTY by default
+ // but when compose is used from a scripr has stdin piped from another command, we just can't
+ // Here, we detect we run "by default" (user didn't passed explicit flags) and disable TTY allocation if
+ // we don't have an actual terminal to attach to for interactive mode
+ options.noTty = true
+ }
+
+ if options.quiet {
+ display.Mode = display.ModeQuiet
+ devnull, err := os.Open(os.DevNull)
if err != nil {
return err
}
- opts.entrypointCmd = command
+ os.Stdout = devnull
}
+ createOpts.pullChanged = cmd.Flags().Changed("pull")
return nil
}),
RunE: Adapt(func(ctx context.Context, args []string) error {
- project, err := p.toProject([]string{opts.Service}, cgo.WithResolvedPaths(true))
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
if err != nil {
return err
}
- ignore := project.Environment["COMPOSE_IGNORE_ORPHANS"]
- opts.ignoreOrphans = strings.ToLower(ignore) == "true"
- return runRun(ctx, backend, project, opts)
+
+ project, _, err := p.ToProject(ctx, dockerCli, backend, []string{options.Service}, composecli.WithoutEnvironmentResolution)
+ if err != nil {
+ return err
+ }
+
+ project, err = project.WithServicesEnvironmentResolved(true)
+ if err != nil {
+ return err
+ }
+
+ if createOpts.quietPull {
+ buildOpts.Progress = string(xprogress.QuietMode)
+ }
+
+ options.ignoreOrphans = utils.StringToBool(project.Environment[ComposeIgnoreOrphans])
+ return runRun(ctx, backend, project, options, createOpts, buildOpts, dockerCli)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
flags := cmd.Flags()
- flags.BoolVarP(&opts.Detach, "detach", "d", false, "Run container in background and print container ID")
- flags.StringArrayVarP(&opts.environment, "env", "e", []string{}, "Set environment variables")
- flags.StringArrayVarP(&opts.labels, "label", "l", []string{}, "Add or override a label")
- flags.BoolVar(&opts.Remove, "rm", false, "Automatically remove the container when it exits")
- flags.BoolVarP(&opts.noTty, "no-TTY", "T", !dockerCli.Out().IsTerminal(), "Disable pseudo-TTY allocation (default: auto-detected).")
- flags.StringVar(&opts.name, "name", "", " Assign a name to the container")
- flags.StringVarP(&opts.user, "user", "u", "", "Run as specified username or uid")
- flags.StringVarP(&opts.workdir, "workdir", "w", "", "Working directory inside the container")
- flags.StringVar(&opts.entrypoint, "entrypoint", "", "Override the entrypoint of the image")
- flags.BoolVar(&opts.noDeps, "no-deps", false, "Don't start linked services.")
- flags.StringArrayVarP(&opts.volumes, "volume", "v", []string{}, "Bind mount a volume.")
- flags.StringArrayVarP(&opts.publish, "publish", "p", []string{}, "Publish a container's port(s) to the host.")
- flags.BoolVar(&opts.useAliases, "use-aliases", false, "Use the service's network useAliases in the network(s) the container connects to.")
- flags.BoolVar(&opts.servicePorts, "service-ports", false, "Run command with the service's ports enabled and mapped to the host.")
- flags.BoolVar(&opts.quietPull, "quiet-pull", false, "Pull without printing progress information.")
-
- cmd.Flags().BoolVarP(&opts.interactive, "interactive", "i", true, "Keep STDIN open even if not attached.")
- cmd.Flags().BoolP("tty", "t", true, "Allocate a pseudo-TTY.")
+ flags.BoolVarP(&options.Detach, "detach", "d", false, "Run container in background and print container ID")
+ flags.StringArrayVarP(&options.environment, "env", "e", []string{}, "Set environment variables")
+ flags.StringArrayVar(&options.envFiles, "env-from-file", []string{}, "Set environment variables from file")
+ flags.StringArrayVarP(&options.labels, "label", "l", []string{}, "Add or override a label")
+ flags.BoolVar(&options.Remove, "rm", false, "Automatically remove the container when it exits")
+ flags.BoolVarP(&options.noTty, "no-TTY", "T", !dockerCli.Out().IsTerminal(), "Disable pseudo-TTY allocation (default: auto-detected)")
+ flags.StringVar(&options.name, "name", "", "Assign a name to the container")
+ flags.StringVarP(&options.user, "user", "u", "", "Run as specified username or uid")
+ flags.StringVarP(&options.workdir, "workdir", "w", "", "Working directory inside the container")
+ flags.StringVar(&options.entrypoint, "entrypoint", "", "Override the entrypoint of the image")
+ flags.Var(&options.capAdd, "cap-add", "Add Linux capabilities")
+ flags.Var(&options.capDrop, "cap-drop", "Drop Linux capabilities")
+ flags.BoolVar(&options.noDeps, "no-deps", false, "Don't start linked services")
+ flags.StringArrayVarP(&options.volumes, "volume", "v", []string{}, "Bind mount a volume")
+ flags.StringArrayVarP(&options.publish, "publish", "p", []string{}, "Publish a container's port(s) to the host")
+ flags.BoolVar(&options.useAliases, "use-aliases", false, "Use the service's network useAliases in the network(s) the container connects to")
+ flags.BoolVarP(&options.servicePorts, "service-ports", "P", false, "Run command with all service's ports enabled and mapped to the host")
+ flags.StringVar(&createOpts.Pull, "pull", "policy", `Pull image before running ("always"|"missing"|"never")`)
+ flags.BoolVarP(&options.quiet, "quiet", "q", false, "Don't print anything to STDOUT")
+ flags.BoolVar(&buildOpts.quiet, "quiet-build", false, "Suppress progress output from the build process")
+ flags.BoolVar(&options.quietPull, "quiet-pull", false, "Pull without printing progress information")
+ flags.BoolVar(&createOpts.Build, "build", false, "Build image before starting container")
+ flags.BoolVar(&options.removeOrphans, "remove-orphans", false, "Remove containers for services not defined in the Compose file")
+
+ cmd.Flags().BoolVarP(&options.interactive, "interactive", "i", true, "Keep STDIN open even if not attached")
+ cmd.Flags().BoolVarP(&ttyFlag, "tty", "t", true, "Allocate a pseudo-TTY")
cmd.Flags().MarkHidden("tty") //nolint:errcheck
flags.SetNormalizeFunc(normalizeRunFlags)
@@ -181,21 +272,23 @@ func normalizeRunFlags(f *pflag.FlagSet, name string) pflag.NormalizedName {
return pflag.NormalizedName(name)
}
-func runRun(ctx context.Context, backend api.Service, project *types.Project, opts runOptions) error {
- err := opts.apply(project)
+func runRun(ctx context.Context, backend api.Compose, project *types.Project, options runOptions, createOpts createOptions, buildOpts buildOptions, dockerCli command.Cli) error {
+ project, err := options.apply(project)
if err != nil {
return err
}
- err = progress.Run(ctx, func(ctx context.Context) error {
- return startDependencies(ctx, backend, *project, opts.Service, opts.ignoreOrphans)
- })
+ err = createOpts.Apply(project)
if err != nil {
return err
}
+ if err := checksForRemoteStack(ctx, dockerCli, project, buildOpts, createOpts.AssumeYes, []string{}); err != nil {
+ return err
+ }
+
labels := types.Labels{}
- for _, s := range opts.labels {
+ for _, s := range options.labels {
parts := strings.SplitN(s, "=", 2)
if len(parts) != 2 {
return fmt.Errorf("label must be set as KEY=VALUE")
@@ -203,30 +296,51 @@ func runRun(ctx context.Context, backend api.Service, project *types.Project, op
labels[parts[0]] = parts[1]
}
+ var buildForRun *api.BuildOptions
+ if !createOpts.noBuild {
+ bo, err := buildOpts.toAPIBuildOptions(nil)
+ if err != nil {
+ return err
+ }
+ buildForRun = &bo
+ }
+
+ environment, err := options.getEnvironment(project.Environment.Resolve)
+ if err != nil {
+ return err
+ }
+
// start container and attach to container streams
runOpts := api.RunOptions{
- Name: opts.name,
- Service: opts.Service,
- Command: opts.Command,
- Detach: opts.Detach,
- AutoRemove: opts.Remove,
- Tty: !opts.noTty,
- Interactive: opts.interactive,
- WorkingDir: opts.workdir,
- User: opts.user,
- Environment: opts.environment,
- Entrypoint: opts.entrypointCmd,
+ CreateOptions: api.CreateOptions{
+ Build: buildForRun,
+ RemoveOrphans: options.removeOrphans,
+ IgnoreOrphans: options.ignoreOrphans,
+ QuietPull: options.quietPull,
+ },
+ Name: options.name,
+ Service: options.Service,
+ Command: options.Command,
+ Detach: options.Detach,
+ AutoRemove: options.Remove,
+ Tty: !options.noTty,
+ Interactive: options.interactive,
+ WorkingDir: options.workdir,
+ User: options.user,
+ CapAdd: options.capAdd.GetSlice(),
+ CapDrop: options.capDrop.GetSlice(),
+ Environment: environment.Values(),
+ Entrypoint: options.entrypointCmd,
Labels: labels,
- UseNetworkAliases: opts.useAliases,
- NoDeps: opts.noDeps,
+ UseNetworkAliases: options.useAliases,
+ NoDeps: options.noDeps,
Index: 0,
- QuietPull: opts.quietPull,
}
- for i, service := range project.Services {
- if service.Name == opts.Service {
- service.StdinOpen = opts.interactive
- project.Services[i] = service
+ for name, service := range project.Services {
+ if name == options.Service {
+ service.StdinOpen = options.interactive
+ project.Services[name] = service
}
}
@@ -240,29 +354,3 @@ func runRun(ctx context.Context, backend api.Service, project *types.Project, op
}
return err
}
-
-func startDependencies(ctx context.Context, backend api.Service, project types.Project, requestedServiceName string, ignoreOrphans bool) error {
- dependencies := types.Services{}
- var requestedService types.ServiceConfig
- for _, service := range project.Services {
- if service.Name != requestedServiceName {
- dependencies = append(dependencies, service)
- } else {
- requestedService = service
- }
- }
-
- project.Services = dependencies
- project.DisabledServices = append(project.DisabledServices, requestedService)
- err := backend.Create(ctx, &project, api.CreateOptions{
- IgnoreOrphans: ignoreOrphans,
- })
- if err != nil {
- return err
- }
-
- if len(dependencies) > 0 {
- return backend.Start(ctx, project.Name, api.StartOptions{})
- }
- return nil
-}
diff --git a/cmd/compose/scale.go b/cmd/compose/scale.go
new file mode 100644
index 00000000000..40baeae34d7
--- /dev/null
+++ b/cmd/compose/scale.go
@@ -0,0 +1,106 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+ "maps"
+ "slices"
+ "strconv"
+ "strings"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
+ "github.com/spf13/cobra"
+)
+
+type scaleOptions struct {
+ *ProjectOptions
+ noDeps bool
+}
+
+func scaleCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
+ opts := scaleOptions{
+ ProjectOptions: p,
+ }
+ scaleCmd := &cobra.Command{
+ Use: "scale [SERVICE=REPLICAS...]",
+ Short: "Scale services ",
+ Args: cobra.MinimumNArgs(1),
+ RunE: Adapt(func(ctx context.Context, args []string) error {
+ serviceTuples, err := parseServicesReplicasArgs(args)
+ if err != nil {
+ return err
+ }
+ return runScale(ctx, dockerCli, backendOptions, opts, serviceTuples)
+ }),
+ ValidArgsFunction: completeScaleArgs(dockerCli, p),
+ }
+ flags := scaleCmd.Flags()
+ flags.BoolVar(&opts.noDeps, "no-deps", false, "Don't start linked services")
+
+ return scaleCmd
+}
+
+func runScale(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts scaleOptions, serviceReplicaTuples map[string]int) error {
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+
+ services := slices.Sorted(maps.Keys(serviceReplicaTuples))
+ project, _, err := opts.ToProject(ctx, dockerCli, backend, services)
+ if err != nil {
+ return err
+ }
+
+ if opts.noDeps {
+ if project, err = project.WithSelectedServices(services, types.IgnoreDependencies); err != nil {
+ return err
+ }
+ }
+
+ for key, value := range serviceReplicaTuples {
+ service, err := project.GetService(key)
+ if err != nil {
+ return err
+ }
+ service.SetScale(value)
+ project.Services[key] = service
+ }
+
+ return backend.Scale(ctx, project, api.ScaleOptions{Services: services})
+}
+
+func parseServicesReplicasArgs(args []string) (map[string]int, error) {
+ serviceReplicaTuples := map[string]int{}
+ for _, arg := range args {
+ key, val, ok := strings.Cut(arg, "=")
+ if !ok || key == "" || val == "" {
+ return nil, fmt.Errorf("invalid scale specifier: %s", arg)
+ }
+ intValue, err := strconv.Atoi(val)
+ if err != nil {
+ return nil, fmt.Errorf("invalid scale specifier: can't parse replica value as int: %v", arg)
+ }
+ serviceReplicaTuples[key] = intValue
+ }
+ return serviceReplicaTuples, nil
+}
diff --git a/cmd/compose/start.go b/cmd/compose/start.go
index e9b67ae4403..3ef92dc973f 100644
--- a/cmd/compose/start.go
+++ b/cmd/compose/start.go
@@ -19,36 +19,44 @@ package compose
import (
"context"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
)
type startOptions struct {
- *projectOptions
+ *ProjectOptions
}
-func startCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func startCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := startOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
startCmd := &cobra.Command{
Use: "start [SERVICE...]",
Short: "Start services",
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runStart(ctx, backend, opts, args)
+ return runStart(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
return startCmd
}
-func runStart(ctx context.Context, backend api.Service, opts startOptions, services []string) error {
- projectName, err := opts.toProjectName()
+func runStart(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts startOptions, services []string) error {
+ project, name, err := opts.projectOrName(ctx, dockerCli, services...)
if err != nil {
return err
}
- return backend.Start(ctx, projectName, api.StartOptions{
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+ return backend.Start(ctx, name, api.StartOptions{
AttachTo: services,
+ Project: project,
+ Services: services,
})
}
diff --git a/cmd/compose/stats.go b/cmd/compose/stats.go
new file mode 100644
index 00000000000..cef2daf275d
--- /dev/null
+++ b/cmd/compose/stats.go
@@ -0,0 +1,84 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/cli/cli/command/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/spf13/cobra"
+
+ "github.com/docker/compose/v5/pkg/api"
+)
+
+type statsOptions struct {
+ ProjectOptions *ProjectOptions
+ all bool
+ format string
+ noStream bool
+ noTrunc bool
+}
+
+func statsCommand(p *ProjectOptions, dockerCli command.Cli) *cobra.Command {
+ opts := statsOptions{
+ ProjectOptions: p,
+ }
+ cmd := &cobra.Command{
+ Use: "stats [OPTIONS] [SERVICE]",
+ Short: "Display a live stream of container(s) resource usage statistics",
+ Args: cobra.MaximumNArgs(1),
+ RunE: Adapt(func(ctx context.Context, args []string) error {
+ return runStats(ctx, dockerCli, opts, args)
+ }),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
+ }
+ flags := cmd.Flags()
+ flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)")
+ flags.StringVar(&opts.format, "format", "", `Format output using a custom template:
+'table': Print output in table format with column headers (default)
+'table TEMPLATE': Print output in table format using the given Go template
+'json': Print in JSON format
+'TEMPLATE': Print output using the given Go template.
+Refer to https://docs.docker.com/engine/cli/formatting/ for more information about formatting output with templates`)
+ flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result")
+ flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output")
+ return cmd
+}
+
+func runStats(ctx context.Context, dockerCli command.Cli, opts statsOptions, service []string) error {
+ name, err := opts.ProjectOptions.toProjectName(ctx, dockerCli)
+ if err != nil {
+ return err
+ }
+ filter := []filters.KeyValuePair{
+ filters.Arg("label", fmt.Sprintf("%s=%s", api.ProjectLabel, name)),
+ }
+ if len(service) > 0 {
+ filter = append(filter, filters.Arg("label", fmt.Sprintf("%s=%s", api.ServiceLabel, service[0])))
+ }
+ args := filters.NewArgs(filter...)
+ return container.RunStats(ctx, dockerCli, &container.StatsOptions{
+ All: opts.all,
+ NoStream: opts.noStream,
+ NoTrunc: opts.noTrunc,
+ Format: opts.format,
+ Filters: &args,
+ })
+}
diff --git a/cmd/compose/stop.go b/cmd/compose/stop.go
index 39861cd68c3..67f299e0485 100644
--- a/cmd/compose/stop.go
+++ b/cmd/compose/stop.go
@@ -20,40 +20,42 @@ import (
"context"
"time"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
)
type stopOptions struct {
- *projectOptions
+ *ProjectOptions
timeChanged bool
timeout int
}
-func stopCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func stopCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := stopOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
cmd := &cobra.Command{
- Use: "stop [SERVICE...]",
+ Use: "stop [OPTIONS] [SERVICE...]",
Short: "Stop services",
PreRun: func(cmd *cobra.Command, args []string) {
opts.timeChanged = cmd.Flags().Changed("timeout")
},
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runStop(ctx, backend, opts, args)
+ return runStop(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
flags := cmd.Flags()
- flags.IntVarP(&opts.timeout, "timeout", "t", 10, "Specify a shutdown timeout in seconds")
+ flags.IntVarP(&opts.timeout, "timeout", "t", 0, "Specify a shutdown timeout in seconds")
return cmd
}
-func runStop(ctx context.Context, backend api.Service, opts stopOptions, services []string) error {
- projectName, err := opts.toProjectName()
+func runStop(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts stopOptions, services []string) error {
+ project, name, err := opts.projectOrName(ctx, dockerCli, services...)
if err != nil {
return err
}
@@ -63,8 +65,13 @@ func runStop(ctx context.Context, backend api.Service, opts stopOptions, service
timeoutValue := time.Duration(opts.timeout) * time.Second
timeout = &timeoutValue
}
- return backend.Stop(ctx, projectName, api.StopOptions{
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+ return backend.Stop(ctx, name, api.StopOptions{
Timeout: timeout,
Services: services,
+ Project: project,
})
}
diff --git a/cmd/compose/top.go b/cmd/compose/top.go
index 60f3a1fac8a..b1b9ae42a95 100644
--- a/cmd/compose/top.go
+++ b/cmd/compose/top.go
@@ -20,37 +20,48 @@ import (
"context"
"fmt"
"io"
- "os"
"sort"
"strings"
"text/tabwriter"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/compose"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
)
type topOptions struct {
- *projectOptions
+ *ProjectOptions
}
-func topCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func topCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
opts := topOptions{
- projectOptions: p,
+ ProjectOptions: p,
}
topCmd := &cobra.Command{
Use: "top [SERVICES...]",
Short: "Display the running processes",
RunE: Adapt(func(ctx context.Context, args []string) error {
- return runTop(ctx, backend, opts, args)
+ return runTop(ctx, dockerCli, backendOptions, opts, args)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
return topCmd
}
-func runTop(ctx context.Context, backend api.Service, opts topOptions, services []string) error {
- projectName, err := opts.toProjectName()
+type (
+ topHeader map[string]int // maps a proc title to its output index
+ topEntries map[string]string
+)
+
+func runTop(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts topOptions, services []string) error {
+ projectName, err := opts.toProjectName(ctx, dockerCli)
+ if err != nil {
+ return err
+ }
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
if err != nil {
return err
}
@@ -63,30 +74,76 @@ func runTop(ctx context.Context, backend api.Service, opts topOptions, services
return containers[i].Name < containers[j].Name
})
+ header, entries := collectTop(containers)
+ return topPrint(dockerCli.Out(), header, entries)
+}
+
+func collectTop(containers []api.ContainerProcSummary) (topHeader, []topEntries) {
+ // map column name to its header (should keep working if backend.Top returns
+ // varying columns for different containers)
+ header := topHeader{"SERVICE": 0, "#": 1}
+
+ // assume one process per container and grow if needed
+ entries := make([]topEntries, 0, len(containers))
+
for _, container := range containers {
- fmt.Printf("%s\n", container.Name)
- err := psPrinter(os.Stdout, func(w io.Writer) {
- for _, proc := range container.Processes {
- info := []interface{}{}
- for _, p := range proc {
- info = append(info, p)
+ for _, proc := range container.Processes {
+ entry := topEntries{
+ "SERVICE": container.Service,
+ "#": container.Replica,
+ }
+ for i, title := range container.Titles {
+ if _, exists := header[title]; !exists {
+ header[title] = len(header)
}
- _, _ = fmt.Fprintf(w, strings.Repeat("%s\t", len(info))+"\n", info...)
+ entry[title] = proc[i]
+ }
+ entries = append(entries, entry)
+ }
+ }
+ // ensure CMD is the right-most column
+ if pos, ok := header["CMD"]; ok {
+ maxPos := pos
+ for h, i := range header {
+ if i > maxPos {
+ maxPos = i
+ }
+ if i > pos {
+ header[h] = i - 1
}
- fmt.Fprintln(w)
- },
- container.Titles...)
- if err != nil {
- return err
}
+ header["CMD"] = maxPos
}
- return nil
+
+ return header, entries
}
-func psPrinter(out io.Writer, printer func(writer io.Writer), headers ...string) error {
- w := tabwriter.NewWriter(out, 5, 1, 3, ' ', 0)
- _, _ = fmt.Fprintln(w, strings.Join(headers, "\t"))
- printer(w)
+func topPrint(out io.Writer, headers topHeader, rows []topEntries) error {
+ if len(rows) == 0 {
+ return nil
+ }
+
+ w := tabwriter.NewWriter(out, 4, 1, 2, ' ', 0)
+
+ // write headers in the order we've encountered them
+ h := make([]string, len(headers))
+ for title, index := range headers {
+ h[index] = title
+ }
+ _, _ = fmt.Fprintln(w, strings.Join(h, "\t"))
+
+ for _, row := range rows {
+ // write proc data in header order
+ r := make([]string, len(headers))
+ for title, index := range headers {
+ if v, ok := row[title]; ok {
+ r[index] = v
+ } else {
+ r[index] = "-"
+ }
+ }
+ _, _ = fmt.Fprintln(w, strings.Join(r, "\t"))
+ }
return w.Flush()
}
diff --git a/cmd/compose/top_test.go b/cmd/compose/top_test.go
new file mode 100644
index 00000000000..369dfa19cef
--- /dev/null
+++ b/cmd/compose/top_test.go
@@ -0,0 +1,329 @@
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var topTestCases = []struct {
+ name string
+ titles []string
+ procs [][]string
+
+ header topHeader
+ entries []topEntries
+ output string
+}{
+ {
+ name: "noprocs",
+ titles: []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"},
+ procs: [][]string{},
+ header: topHeader{"SERVICE": 0, "#": 1},
+ entries: []topEntries{},
+ output: "",
+ },
+ {
+ name: "simple",
+ titles: []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"},
+ procs: [][]string{{"root", "1", "1", "0", "12:00", "?", "00:00:01", "/entrypoint"}},
+ header: topHeader{
+ "SERVICE": 0,
+ "#": 1,
+ "UID": 2,
+ "PID": 3,
+ "PPID": 4,
+ "C": 5,
+ "STIME": 6,
+ "TTY": 7,
+ "TIME": 8,
+ "CMD": 9,
+ },
+ entries: []topEntries{
+ {
+ "SERVICE": "simple",
+ "#": "1",
+ "UID": "root",
+ "PID": "1",
+ "PPID": "1",
+ "C": "0",
+ "STIME": "12:00",
+ "TTY": "?",
+ "TIME": "00:00:01",
+ "CMD": "/entrypoint",
+ },
+ },
+ output: trim(`
+ SERVICE # UID PID PPID C STIME TTY TIME CMD
+ simple 1 root 1 1 0 12:00 ? 00:00:01 /entrypoint
+ `),
+ },
+ {
+ name: "noppid",
+ titles: []string{"UID", "PID", "C", "STIME", "TTY", "TIME", "CMD"},
+ procs: [][]string{{"root", "1", "0", "12:00", "?", "00:00:02", "/entrypoint"}},
+ header: topHeader{
+ "SERVICE": 0,
+ "#": 1,
+ "UID": 2,
+ "PID": 3,
+ "C": 4,
+ "STIME": 5,
+ "TTY": 6,
+ "TIME": 7,
+ "CMD": 8,
+ },
+ entries: []topEntries{
+ {
+ "SERVICE": "noppid",
+ "#": "1",
+ "UID": "root",
+ "PID": "1",
+ "C": "0",
+ "STIME": "12:00",
+ "TTY": "?",
+ "TIME": "00:00:02",
+ "CMD": "/entrypoint",
+ },
+ },
+ output: trim(`
+ SERVICE # UID PID C STIME TTY TIME CMD
+ noppid 1 root 1 0 12:00 ? 00:00:02 /entrypoint
+ `),
+ },
+ {
+ name: "extra-hdr",
+ titles: []string{"UID", "GID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"},
+ procs: [][]string{{"root", "1", "1", "1", "0", "12:00", "?", "00:00:03", "/entrypoint"}},
+ header: topHeader{
+ "SERVICE": 0,
+ "#": 1,
+ "UID": 2,
+ "GID": 3,
+ "PID": 4,
+ "PPID": 5,
+ "C": 6,
+ "STIME": 7,
+ "TTY": 8,
+ "TIME": 9,
+ "CMD": 10,
+ },
+ entries: []topEntries{
+ {
+ "SERVICE": "extra-hdr",
+ "#": "1",
+ "UID": "root",
+ "GID": "1",
+ "PID": "1",
+ "PPID": "1",
+ "C": "0",
+ "STIME": "12:00",
+ "TTY": "?",
+ "TIME": "00:00:03",
+ "CMD": "/entrypoint",
+ },
+ },
+ output: trim(`
+ SERVICE # UID GID PID PPID C STIME TTY TIME CMD
+ extra-hdr 1 root 1 1 1 0 12:00 ? 00:00:03 /entrypoint
+ `),
+ },
+ {
+ name: "multiple",
+ titles: []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"},
+ procs: [][]string{
+ {"root", "1", "1", "0", "12:00", "?", "00:00:04", "/entrypoint"},
+ {"root", "123", "1", "0", "12:00", "?", "00:00:42", "sleep infinity"},
+ },
+ header: topHeader{
+ "SERVICE": 0,
+ "#": 1,
+ "UID": 2,
+ "PID": 3,
+ "PPID": 4,
+ "C": 5,
+ "STIME": 6,
+ "TTY": 7,
+ "TIME": 8,
+ "CMD": 9,
+ },
+ entries: []topEntries{
+ {
+ "SERVICE": "multiple",
+ "#": "1",
+ "UID": "root",
+ "PID": "1",
+ "PPID": "1",
+ "C": "0",
+ "STIME": "12:00",
+ "TTY": "?",
+ "TIME": "00:00:04",
+ "CMD": "/entrypoint",
+ },
+ {
+ "SERVICE": "multiple",
+ "#": "1",
+ "UID": "root",
+ "PID": "123",
+ "PPID": "1",
+ "C": "0",
+ "STIME": "12:00",
+ "TTY": "?",
+ "TIME": "00:00:42",
+ "CMD": "sleep infinity",
+ },
+ },
+ output: trim(`
+ SERVICE # UID PID PPID C STIME TTY TIME CMD
+ multiple 1 root 1 1 0 12:00 ? 00:00:04 /entrypoint
+ multiple 1 root 123 1 0 12:00 ? 00:00:42 sleep infinity
+ `),
+ },
+}
+
+// TestRunTopCore only tests the core functionality of runTop: formatting
+// and printing of the output of (api.Compose).Top().
+func TestRunTopCore(t *testing.T) {
+ t.Parallel()
+
+ all := []api.ContainerProcSummary{}
+
+ for _, tc := range topTestCases {
+ summary := api.ContainerProcSummary{
+ Name: "not used",
+ Titles: tc.titles,
+ Processes: tc.procs,
+ Service: tc.name,
+ Replica: "1",
+ }
+ all = append(all, summary)
+
+ t.Run(tc.name, func(t *testing.T) {
+ header, entries := collectTop([]api.ContainerProcSummary{summary})
+ assert.Equal(t, tc.header, header)
+ assert.Equal(t, tc.entries, entries)
+
+ var buf bytes.Buffer
+ err := topPrint(&buf, header, entries)
+
+ require.NoError(t, err)
+ assert.Equal(t, tc.output, buf.String())
+ })
+ }
+
+ t.Run("all", func(t *testing.T) {
+ header, entries := collectTop(all)
+ assert.Equal(t, topHeader{
+ "SERVICE": 0,
+ "#": 1,
+ "UID": 2,
+ "PID": 3,
+ "PPID": 4,
+ "C": 5,
+ "STIME": 6,
+ "TTY": 7,
+ "TIME": 8,
+ "GID": 9,
+ "CMD": 10,
+ }, header)
+ assert.Equal(t, []topEntries{
+ {
+ "SERVICE": "simple",
+ "#": "1",
+ "UID": "root",
+ "PID": "1",
+ "PPID": "1",
+ "C": "0",
+ "STIME": "12:00",
+ "TTY": "?",
+ "TIME": "00:00:01",
+ "CMD": "/entrypoint",
+ }, {
+ "SERVICE": "noppid",
+ "#": "1",
+ "UID": "root",
+ "PID": "1",
+ "C": "0",
+ "STIME": "12:00",
+ "TTY": "?",
+ "TIME": "00:00:02",
+ "CMD": "/entrypoint",
+ }, {
+ "SERVICE": "extra-hdr",
+ "#": "1",
+ "UID": "root",
+ "GID": "1",
+ "PID": "1",
+ "PPID": "1",
+ "C": "0",
+ "STIME": "12:00",
+ "TTY": "?",
+ "TIME": "00:00:03",
+ "CMD": "/entrypoint",
+ }, {
+ "SERVICE": "multiple",
+ "#": "1",
+ "UID": "root",
+ "PID": "1",
+ "PPID": "1",
+ "C": "0",
+ "STIME": "12:00",
+ "TTY": "?",
+ "TIME": "00:00:04",
+ "CMD": "/entrypoint",
+ }, {
+ "SERVICE": "multiple",
+ "#": "1",
+ "UID": "root",
+ "PID": "123",
+ "PPID": "1",
+ "C": "0",
+ "STIME": "12:00",
+ "TTY": "?",
+ "TIME": "00:00:42",
+ "CMD": "sleep infinity",
+ },
+ }, entries)
+
+ var buf bytes.Buffer
+ err := topPrint(&buf, header, entries)
+ require.NoError(t, err)
+ assert.Equal(t, trim(`
+ SERVICE # UID PID PPID C STIME TTY TIME GID CMD
+ simple 1 root 1 1 0 12:00 ? 00:00:01 - /entrypoint
+ noppid 1 root 1 - 0 12:00 ? 00:00:02 - /entrypoint
+ extra-hdr 1 root 1 1 0 12:00 ? 00:00:03 1 /entrypoint
+ multiple 1 root 1 1 0 12:00 ? 00:00:04 - /entrypoint
+ multiple 1 root 123 1 0 12:00 ? 00:00:42 - sleep infinity
+ `), buf.String())
+ })
+}
+
+func trim(s string) string {
+ var out bytes.Buffer
+ for _, line := range strings.Split(strings.TrimSpace(s), "\n") {
+ out.WriteString(strings.TrimSpace(line))
+ out.WriteRune('\n')
+ }
+ return out.String()
+}
diff --git a/cmd/compose/up.go b/cmd/compose/up.go
index 9e7e96c6f6e..7b9807c993d 100644
--- a/cmd/compose/up.go
+++ b/cmd/compose/up.go
@@ -18,128 +18,182 @@ package compose
import (
"context"
+ "errors"
"fmt"
"os"
- "strconv"
"strings"
+ "time"
- "github.com/docker/compose/v2/cmd/formatter"
-
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/cmd/display"
+ "github.com/docker/compose/v5/pkg/compose"
+ xprogress "github.com/moby/buildkit/util/progress/progressui"
+ "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
+ "github.com/spf13/pflag"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/utils"
+ "github.com/docker/compose/v5/cmd/formatter"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/utils"
)
// composeOptions hold options common to `up` and `run` to run compose project
type composeOptions struct {
- *projectOptions
+ *ProjectOptions
}
type upOptions struct {
*composeOptions
- Detach bool
- noStart bool
- noDeps bool
- cascadeStop bool
- exitCodeFrom string
- scale []string
- noColor bool
- noPrefix bool
- attachDependencies bool
- attach []string
- wait bool
+ Detach bool
+ noStart bool
+ noDeps bool
+ cascadeStop bool
+ cascadeFail bool
+ exitCodeFrom string
+ noColor bool
+ noPrefix bool
+ attachDependencies bool
+ attach []string
+ noAttach []string
+ timestamp bool
+ wait bool
+ waitTimeout int
+ watch bool
+ navigationMenu bool
+ navigationMenuChanged bool
}
-func (opts upOptions) apply(project *types.Project, services []string) error {
+func (opts upOptions) apply(project *types.Project, services []string) (*types.Project, error) {
if opts.noDeps {
- enabled, err := project.GetServices(services...)
+ var err error
+ project, err = project.WithSelectedServices(services, types.IgnoreDependencies)
if err != nil {
- return err
- }
- for _, s := range project.Services {
- if !utils.StringContains(services, s.Name) {
- project.DisabledServices = append(project.DisabledServices, s)
- }
+ return nil, err
}
- project.Services = enabled
}
if opts.exitCodeFrom != "" {
_, err := project.GetService(opts.exitCodeFrom)
if err != nil {
- return err
+ return nil, err
}
}
- for _, scale := range opts.scale {
- split := strings.Split(scale, "=")
- if len(split) != 2 {
- return fmt.Errorf("invalid --scale option %q. Should be SERVICE=NUM", scale)
- }
- name := split[0]
- replicas, err := strconv.Atoi(split[1])
- if err != nil {
- return err
- }
- err = setServiceScale(project, name, uint64(replicas))
- if err != nil {
- return err
+ return project, nil
+}
+
+func (opts *upOptions) validateNavigationMenu(dockerCli command.Cli) {
+ if !dockerCli.Out().IsTerminal() {
+ opts.navigationMenu = false
+ return
+ }
+ // If --menu flag was not set
+ if !opts.navigationMenuChanged {
+ if envVar, ok := os.LookupEnv(ComposeMenu); ok {
+ opts.navigationMenu = utils.StringToBool(envVar)
+ return
}
+ // ...and COMPOSE_MENU env var is not defined we want the default value to be true
+ opts.navigationMenu = true
}
+}
- return nil
+func (opts upOptions) OnExit() api.Cascade {
+ switch {
+ case opts.cascadeStop:
+ return api.CascadeStop
+ case opts.cascadeFail:
+ return api.CascadeFail
+ default:
+ return api.CascadeIgnore
+ }
}
-func upCommand(p *projectOptions, backend api.Service) *cobra.Command {
+func upCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
up := upOptions{}
create := createOptions{}
+ build := buildOptions{ProjectOptions: p}
upCmd := &cobra.Command{
- Use: "up [SERVICE...]",
+ Use: "up [OPTIONS] [SERVICE...]",
Short: "Create and start containers",
PreRunE: AdaptCmd(func(ctx context.Context, cmd *cobra.Command, args []string) error {
+ create.pullChanged = cmd.Flags().Changed("pull")
create.timeChanged = cmd.Flags().Changed("timeout")
+ up.navigationMenuChanged = cmd.Flags().Changed("menu")
+ if !cmd.Flags().Changed("remove-orphans") {
+ create.removeOrphans = utils.StringToBool(os.Getenv(ComposeRemoveOrphans))
+ }
return validateFlags(&up, &create)
}),
- RunE: p.WithServices(func(ctx context.Context, project *types.Project, services []string) error {
- create.ignoreOrphans = utils.StringToBool(project.Environment["COMPOSE_IGNORE_ORPHANS"])
+ RunE: p.WithServices(dockerCli, func(ctx context.Context, project *types.Project, services []string) error {
+ create.ignoreOrphans = utils.StringToBool(project.Environment[ComposeIgnoreOrphans])
if create.ignoreOrphans && create.removeOrphans {
- return fmt.Errorf("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined")
+ return fmt.Errorf("cannot combine %s and --remove-orphans", ComposeIgnoreOrphans)
+ }
+ if len(up.attach) != 0 && up.attachDependencies {
+ return errors.New("cannot combine --attach and --attach-dependencies")
+ }
+
+ up.validateNavigationMenu(dockerCli)
+
+ if !p.All && len(project.Services) == 0 {
+ return fmt.Errorf("no service selected")
}
- return runUp(ctx, backend, create, up, project, services)
+
+ return runUp(ctx, dockerCli, backendOptions, create, up, build, project, services)
}),
- ValidArgsFunction: serviceCompletion(p),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
}
flags := upCmd.Flags()
flags.BoolVarP(&up.Detach, "detach", "d", false, "Detached mode: Run containers in the background")
- flags.BoolVar(&create.Build, "build", false, "Build images before starting containers.")
- flags.BoolVar(&create.noBuild, "no-build", false, "Don't build an image, even if it's missing.")
- flags.BoolVar(&create.removeOrphans, "remove-orphans", false, "Remove containers for services not defined in the Compose file.")
- flags.StringArrayVar(&up.scale, "scale", []string{}, "Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.")
- flags.BoolVar(&up.noColor, "no-color", false, "Produce monochrome output.")
- flags.BoolVar(&up.noPrefix, "no-log-prefix", false, "Don't print prefix in logs.")
- flags.BoolVar(&create.forceRecreate, "force-recreate", false, "Recreate containers even if their configuration and image haven't changed.")
+ flags.BoolVar(&create.Build, "build", false, "Build images before starting containers")
+ flags.BoolVar(&create.noBuild, "no-build", false, "Don't build an image, even if it's policy")
+ flags.StringVar(&create.Pull, "pull", "policy", `Pull image before running ("always"|"missing"|"never")`)
+ flags.BoolVar(&create.removeOrphans, "remove-orphans", false, "Remove containers for services not defined in the Compose file")
+ flags.StringArrayVar(&create.scale, "scale", []string{}, "Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.")
+ flags.BoolVar(&up.noColor, "no-color", false, "Produce monochrome output")
+ flags.BoolVar(&up.noPrefix, "no-log-prefix", false, "Don't print prefix in logs")
+ flags.BoolVar(&create.forceRecreate, "force-recreate", false, "Recreate containers even if their configuration and image haven't changed")
flags.BoolVar(&create.noRecreate, "no-recreate", false, "If containers already exist, don't recreate them. Incompatible with --force-recreate.")
- flags.BoolVar(&up.noStart, "no-start", false, "Don't start the services after creating them.")
+ flags.BoolVar(&up.noStart, "no-start", false, "Don't start the services after creating them")
flags.BoolVar(&up.cascadeStop, "abort-on-container-exit", false, "Stops all containers if any container was stopped. Incompatible with -d")
+ flags.BoolVar(&up.cascadeFail, "abort-on-container-failure", false, "Stops all containers if any container exited with failure. Incompatible with -d")
flags.StringVar(&up.exitCodeFrom, "exit-code-from", "", "Return the exit code of the selected service container. Implies --abort-on-container-exit")
- flags.IntVarP(&create.timeout, "timeout", "t", 10, "Use this timeout in seconds for container shutdown when attached or when containers are already running.")
- flags.BoolVar(&up.noDeps, "no-deps", false, "Don't start linked services.")
+ flags.IntVarP(&create.timeout, "timeout", "t", 0, "Use this timeout in seconds for container shutdown when attached or when containers are already running")
+ flags.BoolVar(&up.timestamp, "timestamps", false, "Show timestamps")
+ flags.BoolVar(&up.noDeps, "no-deps", false, "Don't start linked services")
flags.BoolVar(&create.recreateDeps, "always-recreate-deps", false, "Recreate dependent containers. Incompatible with --no-recreate.")
- flags.BoolVarP(&create.noInherit, "renew-anon-volumes", "V", false, "Recreate anonymous volumes instead of retrieving data from the previous containers.")
- flags.BoolVar(&up.attachDependencies, "attach-dependencies", false, "Attach to dependent containers.")
- flags.BoolVar(&create.quietPull, "quiet-pull", false, "Pull without printing progress information.")
- flags.StringArrayVar(&up.attach, "attach", []string{}, "Attach to service output.")
+ flags.BoolVarP(&create.noInherit, "renew-anon-volumes", "V", false, "Recreate anonymous volumes instead of retrieving data from the previous containers")
+ flags.BoolVar(&create.quietPull, "quiet-pull", false, "Pull without printing progress information")
+ flags.BoolVar(&build.quiet, "quiet-build", false, "Suppress the build output")
+ flags.StringArrayVar(&up.attach, "attach", []string{}, "Restrict attaching to the specified services. Incompatible with --attach-dependencies.")
+ flags.StringArrayVar(&up.noAttach, "no-attach", []string{}, "Do not attach (stream logs) to the specified services")
+ flags.BoolVar(&up.attachDependencies, "attach-dependencies", false, "Automatically attach to log output of dependent services")
flags.BoolVar(&up.wait, "wait", false, "Wait for services to be running|healthy. Implies detached mode.")
-
+ flags.IntVar(&up.waitTimeout, "wait-timeout", 0, "Maximum duration in seconds to wait for the project to be running|healthy")
+ flags.BoolVarP(&up.watch, "watch", "w", false, "Watch source code and rebuild/refresh containers when files are updated.")
+ flags.BoolVar(&up.navigationMenu, "menu", false, "Enable interactive shortcuts when running attached. Incompatible with --detach. Can also be enable/disable by setting COMPOSE_MENU environment var.")
+ flags.BoolVarP(&create.AssumeYes, "yes", "y", false, `Assume "yes" as answer to all prompts and run non-interactively`)
+ flags.SetNormalizeFunc(func(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ // assumeYes was introduced by mistake as `--y`
+ if name == "y" {
+ logrus.Warn("--y is deprecated, please use --yes instead")
+ name = "yes"
+ }
+ return pflag.NormalizedName(name)
+ })
return upCmd
}
+//nolint:gocyclo
func validateFlags(up *upOptions, create *createOptions) error {
- if up.exitCodeFrom != "" {
+ if up.exitCodeFrom != "" && !up.cascadeFail {
up.cascadeStop = true
}
+ if up.cascadeStop && up.cascadeFail {
+ return fmt.Errorf("--abort-on-container-failure cannot be combined with --abort-on-container-exit")
+ }
if up.wait {
if up.attachDependencies || up.cascadeStop || len(up.attach) > 0 {
return fmt.Errorf("--wait cannot be combined with --abort-on-container-exit, --attach or --attach-dependencies")
@@ -149,8 +203,15 @@ func validateFlags(up *upOptions, create *createOptions) error {
if create.Build && create.noBuild {
return fmt.Errorf("--build and --no-build are incompatible")
}
- if up.Detach && (up.attachDependencies || up.cascadeStop || len(up.attach) > 0) {
- return fmt.Errorf("--detach cannot be combined with --abort-on-container-exit, --attach or --attach-dependencies")
+ if up.Detach && (up.attachDependencies || up.cascadeStop || up.cascadeFail || len(up.attach) > 0 || up.watch) {
+ if up.wait {
+ return fmt.Errorf("--wait cannot be combined with --abort-on-container-exit, --abort-on-container-failure, --attach, --attach-dependencies or --watch")
+ } else {
+ return fmt.Errorf("--detach cannot be combined with --abort-on-container-exit, --abort-on-container-failure, --attach, --attach-dependencies or --watch")
+ }
+ }
+ if create.noInherit && create.noRecreate {
+ return fmt.Errorf("--no-recreate and --renew-anon-volumes are incompatible")
}
if create.forceRecreate && create.noRecreate {
return fmt.Errorf("--force-recreate and --no-recreate are incompatible")
@@ -158,35 +219,57 @@ func validateFlags(up *upOptions, create *createOptions) error {
if create.recreateDeps && create.noRecreate {
return fmt.Errorf("--always-recreate-deps and --no-recreate are incompatible")
}
+ if create.noBuild && up.watch {
+ return fmt.Errorf("--no-build and --watch are incompatible")
+ }
return nil
}
-func runUp(ctx context.Context, backend api.Service, createOptions createOptions, upOptions upOptions, project *types.Project, services []string) error {
- if len(project.Services) == 0 {
- return fmt.Errorf("no service selected")
+//nolint:gocyclo
+func runUp(
+ ctx context.Context,
+ dockerCli command.Cli,
+ backendOptions *BackendOptions,
+ createOptions createOptions,
+ upOptions upOptions,
+ buildOptions buildOptions,
+ project *types.Project,
+ services []string,
+) error {
+ if err := checksForRemoteStack(ctx, dockerCli, project, buildOptions, createOptions.AssumeYes, []string{}); err != nil {
+ return err
}
- createOptions.Apply(project)
-
- err := upOptions.apply(project, services)
+ err := createOptions.Apply(project)
if err != nil {
return err
}
- var consumer api.LogConsumer
- if !upOptions.Detach {
- consumer = formatter.NewLogConsumer(ctx, os.Stdout, !upOptions.noColor, !upOptions.noPrefix)
+ project, err = upOptions.apply(project, services)
+ if err != nil {
+ return err
}
- attachTo := services
- if len(upOptions.attach) > 0 {
- attachTo = upOptions.attach
- }
- if upOptions.attachDependencies {
- attachTo = project.ServiceNames()
+ var build *api.BuildOptions
+ if !createOptions.noBuild {
+ if createOptions.quietPull {
+ buildOptions.Progress = string(xprogress.QuietMode)
+ }
+ // BuildOptions here is nested inside CreateOptions, so
+ // no service list is passed, it will implicitly pick all
+ // services being created, which includes any explicitly
+ // specified via "services" arg here as well as deps
+ bo, err := buildOptions.toAPIBuildOptions(nil)
+ if err != nil {
+ return err
+ }
+ bo.Services = project.ServiceNames()
+ bo.Deps = !upOptions.noDeps
+ build = &bo
}
create := api.CreateOptions{
+ Build: build,
Services: services,
RemoveOrphans: createOptions.removeOrphans,
IgnoreOrphans: createOptions.ignoreOrphans,
@@ -197,37 +280,78 @@ func runUp(ctx context.Context, backend api.Service, createOptions createOptions
QuietPull: createOptions.quietPull,
}
+ if createOptions.AssumeYes {
+ backendOptions.Options = append(backendOptions.Options, compose.WithPrompt(compose.AlwaysOkPrompt()))
+ }
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+
if upOptions.noStart {
return backend.Create(ctx, project, create)
}
+ var consumer api.LogConsumer
+ var attach []string
+ if !upOptions.Detach {
+ consumer = formatter.NewLogConsumer(ctx, dockerCli.Out(), dockerCli.Err(), !upOptions.noColor, !upOptions.noPrefix, upOptions.timestamp)
+
+ var attachSet utils.Set[string]
+ if len(upOptions.attach) != 0 {
+ // services are passed explicitly with --attach, verify they're valid and then use them as-is
+ attachSet = utils.NewSet(upOptions.attach...)
+ unexpectedSvcs := attachSet.Diff(utils.NewSet(project.ServiceNames()...))
+ if len(unexpectedSvcs) != 0 {
+ return fmt.Errorf("cannot attach to services not included in up: %s", strings.Join(unexpectedSvcs.Elements(), ", "))
+ }
+ } else {
+ // mark services being launched (and potentially their deps) for attach
+ // if they didn't opt-out via Compose YAML
+ attachSet = utils.NewSet[string]()
+ var dependencyOpt types.DependencyOption = types.IgnoreDependencies
+ if upOptions.attachDependencies {
+ dependencyOpt = types.IncludeDependencies
+ }
+ if err := project.ForEachService(services, func(serviceName string, s *types.ServiceConfig) error {
+ if s.Attach == nil || *s.Attach {
+ attachSet.Add(serviceName)
+ }
+ return nil
+ }, dependencyOpt); err != nil {
+ return err
+ }
+ }
+ // filter out any services that have been explicitly marked for ignore with `--no-attach`
+ attachSet.RemoveAll(upOptions.noAttach...)
+ attach = attachSet.Elements()
+ }
+
+ timeout := time.Duration(upOptions.waitTimeout) * time.Second
return backend.Up(ctx, project, api.UpOptions{
Create: create,
Start: api.StartOptions{
- Project: project,
- Attach: consumer,
- AttachTo: attachTo,
- ExitCodeFrom: upOptions.exitCodeFrom,
- CascadeStop: upOptions.cascadeStop,
- Wait: upOptions.wait,
+ Project: project,
+ Attach: consumer,
+ AttachTo: attach,
+ ExitCodeFrom: upOptions.exitCodeFrom,
+ OnExit: upOptions.OnExit(),
+ Wait: upOptions.wait,
+ WaitTimeout: timeout,
+ Watch: upOptions.watch,
+ Services: services,
+ NavigationMenu: upOptions.navigationMenu && display.Mode != "plain" && dockerCli.In().IsTerminal(),
},
})
}
-func setServiceScale(project *types.Project, name string, replicas uint64) error {
- for i, s := range project.Services {
- if s.Name == name {
- service, err := project.GetService(name)
- if err != nil {
- return err
- }
- if service.Deploy == nil {
- service.Deploy = &types.DeployConfig{}
- }
- service.Deploy.Replicas = &replicas
- project.Services[i] = service
- return nil
- }
+func setServiceScale(project *types.Project, name string, replicas int) error {
+ service, err := project.GetService(name)
+ if err != nil {
+ return err
}
- return fmt.Errorf("unknown service %q", name)
+ service.SetScale(replicas)
+ project.Services[name] = service
+ return nil
}
diff --git a/cmd/compose/up_test.go b/cmd/compose/up_test.go
index dd8f9de412d..9019a40ff5a 100644
--- a/cmd/compose/up_test.go
+++ b/cmd/compose/up_test.go
@@ -19,25 +19,32 @@ package compose
import (
"testing"
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/types"
"gotest.tools/v3/assert"
)
func TestApplyScaleOpt(t *testing.T) {
p := types.Project{
- Services: []types.ServiceConfig{
- {
+ Services: types.Services{
+ "foo": {
Name: "foo",
},
- {
+ "bar": {
Name: "bar",
+ Deploy: &types.DeployConfig{
+ Mode: "test",
+ },
},
},
}
- opt := upOptions{scale: []string{"foo=2"}}
- err := opt.apply(&p, nil)
+ err := applyScaleOpts(&p, []string{"foo=2", "bar=3"})
assert.NilError(t, err)
foo, err := p.GetService("foo")
assert.NilError(t, err)
- assert.Equal(t, *foo.Deploy.Replicas, uint64(2))
+ assert.Equal(t, *foo.Scale, 2)
+
+ bar, err := p.GetService("bar")
+ assert.NilError(t, err)
+ assert.Equal(t, *bar.Scale, 3)
+ assert.Equal(t, *bar.Deploy.Replicas, 3)
}
diff --git a/cmd/compose/version.go b/cmd/compose/version.go
index 30e0e753e57..86afcd189d9 100644
--- a/cmd/compose/version.go
+++ b/cmd/compose/version.go
@@ -20,11 +20,12 @@ import (
"fmt"
"strings"
- "github.com/docker/compose/v2/cmd/formatter"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/cmd/formatter"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/internal"
+ "github.com/docker/compose/v5/internal"
)
type versionOptions struct {
@@ -32,33 +33,38 @@ type versionOptions struct {
short bool
}
-func versionCommand() *cobra.Command {
+func versionCommand(dockerCli command.Cli) *cobra.Command {
opts := versionOptions{}
cmd := &cobra.Command{
- Use: "version",
+ Use: "version [OPTIONS]",
Short: "Show the Docker Compose version information",
- Args: cobra.MaximumNArgs(0),
+ Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
- runVersion(opts)
+ runVersion(opts, dockerCli)
+ return nil
+ },
+ PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
+ // overwrite parent PersistentPreRunE to avoid trying to load
+ // compose file on version command if COMPOSE_FILE is set
return nil
},
}
// define flags for backward compatibility with com.docker.cli
flags := cmd.Flags()
flags.StringVarP(&opts.format, "format", "f", "", "Format the output. Values: [pretty | json]. (Default: pretty)")
- flags.BoolVar(&opts.short, "short", false, "Shows only Compose's version number.")
+ flags.BoolVar(&opts.short, "short", false, "Shows only Compose's version number")
return cmd
}
-func runVersion(opts versionOptions) {
+func runVersion(opts versionOptions, dockerCli command.Cli) {
if opts.short {
- fmt.Println(strings.TrimPrefix(internal.Version, "v"))
+ _, _ = fmt.Fprintln(dockerCli.Out(), strings.TrimPrefix(internal.Version, "v"))
return
}
if opts.format == formatter.JSON {
- fmt.Printf("{\"version\":%q}\n", internal.Version)
+ _, _ = fmt.Fprintf(dockerCli.Out(), "{\"version\":%q}\n", internal.Version)
return
}
- fmt.Println("Docker Compose version", internal.Version)
+ _, _ = fmt.Fprintln(dockerCli.Out(), "Docker Compose version", internal.Version)
}
diff --git a/cmd/compose/version_test.go b/cmd/compose/version_test.go
new file mode 100644
index 00000000000..cedaf63c478
--- /dev/null
+++ b/cmd/compose/version_test.go
@@ -0,0 +1,76 @@
+/*
+ Copyright 2025 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/docker/cli/cli/streams"
+ "github.com/docker/compose/v5/internal"
+ "github.com/docker/compose/v5/pkg/mocks"
+ "go.uber.org/mock/gomock"
+ "gotest.tools/v3/assert"
+)
+
+func TestVersionCommand(t *testing.T) {
+ originalVersion := internal.Version
+ defer func() {
+ internal.Version = originalVersion
+ }()
+ internal.Version = "v9.9.9-test"
+
+ tests := []struct {
+ name string
+ args []string
+ want string
+ }{
+ {
+ name: "default",
+ args: []string{},
+ want: "Docker Compose version v9.9.9-test\n",
+ },
+ {
+ name: "short flag",
+ args: []string{"--short"},
+ want: "9.9.9-test\n",
+ },
+ {
+ name: "json flag",
+ args: []string{"--format", "json"},
+ want: `{"version":"v9.9.9-test"}` + "\n",
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ buf := new(bytes.Buffer)
+ cli := mocks.NewMockCli(ctrl)
+ cli.EXPECT().Out().Return(streams.NewOut(buf)).AnyTimes()
+
+ cmd := versionCommand(cli)
+ cmd.SetArgs(test.args)
+ err := cmd.Execute()
+ assert.NilError(t, err)
+
+ assert.Equal(t, test.want, buf.String())
+ })
+ }
+}
diff --git a/cmd/compose/viz.go b/cmd/compose/viz.go
new file mode 100644
index 00000000000..a3e54883d38
--- /dev/null
+++ b/cmd/compose/viz.go
@@ -0,0 +1,104 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
+ "github.com/spf13/cobra"
+)
+
+type vizOptions struct {
+ *ProjectOptions
+ includeNetworks bool
+ includePorts bool
+ includeImageName bool
+ indentationStr string
+}
+
+func vizCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
+ opts := vizOptions{
+ ProjectOptions: p,
+ }
+ var indentationSize int
+ var useSpaces bool
+
+ cmd := &cobra.Command{
+ Use: "viz [OPTIONS]",
+ Short: "EXPERIMENTAL - Generate a graphviz graph from your compose file",
+ PreRunE: Adapt(func(ctx context.Context, args []string) error {
+ var err error
+ opts.indentationStr, err = preferredIndentationStr(indentationSize, useSpaces)
+ return err
+ }),
+ RunE: Adapt(func(ctx context.Context, args []string) error {
+ return runViz(ctx, dockerCli, backendOptions, &opts)
+ }),
+ }
+
+ cmd.Flags().BoolVar(&opts.includePorts, "ports", false, "Include service's exposed ports in output graph")
+ cmd.Flags().BoolVar(&opts.includeNetworks, "networks", false, "Include service's attached networks in output graph")
+ cmd.Flags().BoolVar(&opts.includeImageName, "image", false, "Include service's image name in output graph")
+ cmd.Flags().IntVar(&indentationSize, "indentation-size", 1, "Number of tabs or spaces to use for indentation")
+ cmd.Flags().BoolVar(&useSpaces, "spaces", false, "If given, space character ' ' will be used to indent,\notherwise tab character '\\t' will be used")
+ return cmd
+}
+
+func runViz(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts *vizOptions) error {
+ _, _ = fmt.Fprintln(os.Stderr, "viz command is EXPERIMENTAL")
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+
+ project, _, err := opts.ToProject(ctx, dockerCli, backend, nil)
+ if err != nil {
+ return err
+ }
+
+ // build graph
+ graphStr, _ := backend.Viz(ctx, project, api.VizOptions{
+ IncludeNetworks: opts.includeNetworks,
+ IncludePorts: opts.includePorts,
+ IncludeImageName: opts.includeImageName,
+ Indentation: opts.indentationStr,
+ })
+
+ fmt.Println(graphStr)
+
+ return nil
+}
+
+// preferredIndentationStr returns a single string given the indentation preference
+func preferredIndentationStr(size int, useSpace bool) (string, error) {
+ if size < 0 {
+ return "", fmt.Errorf("invalid indentation size: %d", size)
+ }
+
+ indentationStr := "\t"
+ if useSpace {
+ indentationStr = " "
+ }
+ return strings.Repeat(indentationStr, size), nil
+}
diff --git a/cmd/compose/viz_test.go b/cmd/compose/viz_test.go
new file mode 100644
index 00000000000..f4a90501e33
--- /dev/null
+++ b/cmd/compose/viz_test.go
@@ -0,0 +1,94 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPreferredIndentationStr(t *testing.T) {
+ type args struct {
+ size int
+ useSpace bool
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ wantErr bool
+ }{
+ {
+ name: "should return '\\t\\t'",
+ args: args{
+ size: 2,
+ useSpace: false,
+ },
+ want: "\t\t",
+ wantErr: false,
+ },
+ {
+ name: "should return ' '",
+ args: args{
+ size: 4,
+ useSpace: true,
+ },
+ want: " ",
+ wantErr: false,
+ },
+ {
+ name: "should return ''",
+ args: args{
+ size: 0,
+ useSpace: false,
+ },
+ want: "",
+ wantErr: false,
+ },
+ {
+ name: "should return ''",
+ args: args{
+ size: 0,
+ useSpace: true,
+ },
+ want: "",
+ wantErr: false,
+ },
+ {
+ name: "should throw error because indentation size < 0",
+ args: args{
+ size: -1,
+ useSpace: false,
+ },
+ want: "",
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := preferredIndentationStr(tt.args.size, tt.args.useSpace)
+ if tt.wantErr {
+ require.Errorf(t, err, "preferredIndentationStr(%v, %v)", tt.args.size, tt.args.useSpace)
+ } else {
+ require.NoError(t, err)
+ assert.Equalf(t, tt.want, got, "preferredIndentationStr(%v, %v)", tt.args.size, tt.args.useSpace)
+ }
+ })
+ }
+}
diff --git a/cmd/compose/volumes.go b/cmd/compose/volumes.go
new file mode 100644
index 00000000000..eae4dbcbcd5
--- /dev/null
+++ b/cmd/compose/volumes.go
@@ -0,0 +1,97 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+ "slices"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/cli/cli/command/formatter"
+ "github.com/docker/cli/cli/flags"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
+ "github.com/spf13/cobra"
+)
+
+type volumesOptions struct {
+ *ProjectOptions
+ Quiet bool
+ Format string
+}
+
+func volumesCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
+ options := volumesOptions{
+ ProjectOptions: p,
+ }
+
+ cmd := &cobra.Command{
+ Use: "volumes [OPTIONS] [SERVICE...]",
+ Short: "List volumes",
+ RunE: Adapt(func(ctx context.Context, args []string) error {
+ return runVol(ctx, dockerCli, backendOptions, args, options)
+ }),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
+ }
+
+ cmd.Flags().BoolVarP(&options.Quiet, "quiet", "q", false, "Only display volume names")
+ cmd.Flags().StringVar(&options.Format, "format", "table", flags.FormatHelp)
+
+ return cmd
+}
+
+func runVol(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, services []string, options volumesOptions) error {
+ project, name, err := options.projectOrName(ctx, dockerCli, services...)
+ if err != nil {
+ return err
+ }
+
+ if project != nil {
+ names := project.ServiceNames()
+ for _, service := range services {
+ if !slices.Contains(names, service) {
+ return fmt.Errorf("no such service: %s", service)
+ }
+ }
+ }
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+ volumes, err := backend.Volumes(ctx, name, api.VolumesOptions{
+ Services: services,
+ })
+ if err != nil {
+ return err
+ }
+
+ if options.Quiet {
+ for _, v := range volumes {
+ _, _ = fmt.Fprintln(dockerCli.Out(), v.Name)
+ }
+ return nil
+ }
+
+ volumeCtx := formatter.Context{
+ Output: dockerCli.Out(),
+ Format: formatter.NewVolumeFormat(options.Format, options.Quiet),
+ }
+
+ return formatter.VolumeWrite(volumeCtx, volumes)
+}
diff --git a/cmd/compose/wait.go b/cmd/compose/wait.go
new file mode 100644
index 00000000000..f8b30f69330
--- /dev/null
+++ b/cmd/compose/wait.go
@@ -0,0 +1,78 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "os"
+
+ "github.com/docker/cli/cli"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
+ "github.com/spf13/cobra"
+)
+
+type waitOptions struct {
+ *ProjectOptions
+
+ services []string
+
+ downProject bool
+}
+
+func waitCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
+ opts := waitOptions{
+ ProjectOptions: p,
+ }
+
+ var statusCode int64
+ var err error
+ cmd := &cobra.Command{
+ Use: "wait SERVICE [SERVICE...] [OPTIONS]",
+ Short: "Block until containers of all (or specified) services stop.",
+ Args: cli.RequiresMinArgs(1),
+ RunE: Adapt(func(ctx context.Context, services []string) error {
+ opts.services = services
+ statusCode, err = runWait(ctx, dockerCli, backendOptions, &opts)
+ return err
+ }),
+ PostRun: func(cmd *cobra.Command, args []string) {
+ os.Exit(int(statusCode))
+ },
+ }
+
+ cmd.Flags().BoolVar(&opts.downProject, "down-project", false, "Drops project when the first container stops")
+
+ return cmd
+}
+
+func runWait(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, opts *waitOptions) (int64, error) {
+ _, name, err := opts.projectOrName(ctx, dockerCli)
+ if err != nil {
+ return 0, err
+ }
+
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return 0, err
+ }
+ return backend.Wait(ctx, name, api.WaitOptions{
+ Services: opts.services,
+ DownProjectOnContainerExit: opts.downProject,
+ })
+}
diff --git a/cmd/compose/watch.go b/cmd/compose/watch.go
new file mode 100644
index 00000000000..b77617e4cf3
--- /dev/null
+++ b/cmd/compose/watch.go
@@ -0,0 +1,132 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/compose/v5/cmd/formatter"
+ "github.com/docker/compose/v5/pkg/compose"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/internal/locker"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type watchOptions struct {
+ *ProjectOptions
+ prune bool
+ noUp bool
+}
+
+func watchCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
+ watchOpts := watchOptions{
+ ProjectOptions: p,
+ }
+ buildOpts := buildOptions{
+ ProjectOptions: p,
+ }
+ cmd := &cobra.Command{
+ Use: "watch [SERVICE...]",
+ Short: "Watch build context for service and rebuild/refresh containers when files are updated",
+ PreRunE: Adapt(func(ctx context.Context, args []string) error {
+ return nil
+ }),
+ RunE: AdaptCmd(func(ctx context.Context, cmd *cobra.Command, args []string) error {
+ if cmd.Parent().Name() == "alpha" {
+ logrus.Warn("watch command is now available as a top level command")
+ }
+ return runWatch(ctx, dockerCli, backendOptions, watchOpts, buildOpts, args)
+ }),
+ ValidArgsFunction: completeServiceNames(dockerCli, p),
+ }
+
+ cmd.Flags().BoolVar(&buildOpts.quiet, "quiet", false, "hide build output")
+ cmd.Flags().BoolVar(&watchOpts.prune, "prune", true, "Prune dangling images on rebuild")
+ cmd.Flags().BoolVar(&watchOpts.noUp, "no-up", false, "Do not build & start services before watching")
+ return cmd
+}
+
+func runWatch(ctx context.Context, dockerCli command.Cli, backendOptions *BackendOptions, watchOpts watchOptions, buildOpts buildOptions, services []string) error {
+ backend, err := compose.NewComposeService(dockerCli, backendOptions.Options...)
+ if err != nil {
+ return err
+ }
+
+ project, _, err := watchOpts.ToProject(ctx, dockerCli, backend, services)
+ if err != nil {
+ return err
+ }
+
+ if err := applyPlatforms(project, true); err != nil {
+ return err
+ }
+
+ build, err := buildOpts.toAPIBuildOptions(nil)
+ if err != nil {
+ return err
+ }
+
+ // validation done -- ensure we have the lockfile for this project before doing work
+ l, err := locker.NewPidfile(project.Name)
+ if err != nil {
+ return fmt.Errorf("cannot take exclusive lock for project %q: %w", project.Name, err)
+ }
+ if err := l.Lock(); err != nil {
+ return fmt.Errorf("cannot take exclusive lock for project %q: %w", project.Name, err)
+ }
+
+ if !watchOpts.noUp {
+ for index, service := range project.Services {
+ if service.Build != nil && service.Develop != nil {
+ service.PullPolicy = types.PullPolicyBuild
+ }
+ project.Services[index] = service
+ }
+ upOpts := api.UpOptions{
+ Create: api.CreateOptions{
+ Build: &build,
+ Services: services,
+ RemoveOrphans: false,
+ Recreate: api.RecreateDiverged,
+ RecreateDependencies: api.RecreateNever,
+ Inherit: true,
+ QuietPull: buildOpts.quiet,
+ },
+ Start: api.StartOptions{
+ Project: project,
+ Attach: nil,
+ Services: services,
+ },
+ }
+ if err := backend.Up(ctx, project, upOpts); err != nil {
+ return err
+ }
+ }
+
+ consumer := formatter.NewLogConsumer(ctx, dockerCli.Out(), dockerCli.Err(), false, false, false)
+ return backend.Watch(ctx, project, api.WatchOptions{
+ Build: &build,
+ LogTo: consumer,
+ Prune: watchOpts.prune,
+ Services: services,
+ })
+}
diff --git a/cmd/display/colors.go b/cmd/display/colors.go
new file mode 100644
index 00000000000..a00b4ed6ad5
--- /dev/null
+++ b/cmd/display/colors.go
@@ -0,0 +1,47 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package display
+
+import (
+ "github.com/morikuni/aec"
+)
+
+type colorFunc func(string) string
+
+var (
+ nocolor colorFunc = func(s string) string {
+ return s
+ }
+
+ DoneColor colorFunc = aec.BlueF.Apply
+ TimerColor colorFunc = aec.BlueF.Apply
+ CountColor colorFunc = aec.YellowF.Apply
+ WarningColor colorFunc = aec.YellowF.With(aec.Bold).Apply
+ SuccessColor colorFunc = aec.GreenF.Apply
+ ErrorColor colorFunc = aec.RedF.With(aec.Bold).Apply
+ PrefixColor colorFunc = aec.CyanF.Apply
+)
+
+func NoColor() {
+ DoneColor = nocolor
+ TimerColor = nocolor
+ CountColor = nocolor
+ WarningColor = nocolor
+ SuccessColor = nocolor
+ ErrorColor = nocolor
+ PrefixColor = nocolor
+}
diff --git a/scripts/validate/template/go.txt b/cmd/display/dryrun.go
similarity index 90%
rename from scripts/validate/template/go.txt
rename to cmd/display/dryrun.go
index a3ee08615e3..2ab542e5b05 100644
--- a/scripts/validate/template/go.txt
+++ b/cmd/display/dryrun.go
@@ -14,3 +14,8 @@
limitations under the License.
*/
+package display
+
+const (
+ DRYRUN_PREFIX = " DRY-RUN MODE - "
+)
diff --git a/cmd/display/json.go b/cmd/display/json.go
new file mode 100644
index 00000000000..b8873596374
--- /dev/null
+++ b/cmd/display/json.go
@@ -0,0 +1,81 @@
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package display
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/docker/compose/v5/pkg/api"
+)
+
+func JSON(out io.Writer) api.EventProcessor {
+ return &jsonWriter{
+ out: out,
+ }
+}
+
+type jsonWriter struct {
+ out io.Writer
+ dryRun bool
+}
+
+type jsonMessage struct {
+ DryRun bool `json:"dry-run,omitempty"`
+ Tail bool `json:"tail,omitempty"`
+ ID string `json:"id,omitempty"`
+ ParentID string `json:"parent_id,omitempty"`
+ Status string `json:"status,omitempty"`
+ Text string `json:"text,omitempty"`
+ Details string `json:"details,omitempty"`
+ Current int64 `json:"current,omitempty"`
+ Total int64 `json:"total,omitempty"`
+ Percent int `json:"percent,omitempty"`
+}
+
+func (p *jsonWriter) Start(ctx context.Context, operation string) {
+}
+
+func (p *jsonWriter) Event(e api.Resource) {
+ message := &jsonMessage{
+ DryRun: p.dryRun,
+ Tail: false,
+ ID: e.ID,
+ Status: e.StatusText(),
+ Text: e.Text,
+ Details: e.Details,
+ ParentID: e.ParentID,
+ Current: e.Current,
+ Total: e.Total,
+ Percent: e.Percent,
+ }
+ marshal, err := json.Marshal(message)
+ if err == nil {
+ _, _ = fmt.Fprintln(p.out, string(marshal))
+ }
+}
+
+func (p *jsonWriter) On(events ...api.Resource) {
+ for _, e := range events {
+ p.Event(e)
+ }
+}
+
+func (p *jsonWriter) Done(_ string, _ bool) {
+}
diff --git a/cmd/display/json_test.go b/cmd/display/json_test.go
new file mode 100644
index 00000000000..26895dc1022
--- /dev/null
+++ b/cmd/display/json_test.go
@@ -0,0 +1,61 @@
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package display
+
+import (
+ "bytes"
+ "encoding/json"
+ "testing"
+
+ "github.com/docker/compose/v5/pkg/api"
+ "gotest.tools/v3/assert"
+)
+
+func TestJsonWriter_Event(t *testing.T) {
+ var out bytes.Buffer
+ w := &jsonWriter{
+ out: &out,
+ dryRun: true,
+ }
+
+ event := api.Resource{
+ ID: "service1",
+ ParentID: "project",
+ Status: api.Working,
+ Text: api.StatusCreating,
+ Current: 50,
+ Total: 100,
+ Percent: 50,
+ }
+ w.Event(event)
+
+ var actual jsonMessage
+ err := json.Unmarshal(out.Bytes(), &actual)
+ assert.NilError(t, err)
+
+ expected := jsonMessage{
+ DryRun: true,
+ ID: event.ID,
+ ParentID: event.ParentID,
+ Text: api.StatusCreating,
+ Status: "Working",
+ Current: event.Current,
+ Total: event.Total,
+ Percent: event.Percent,
+ }
+ assert.DeepEqual(t, expected, actual)
+}
diff --git a/cmd/display/mode.go b/cmd/display/mode.go
new file mode 100644
index 00000000000..d66777b472c
--- /dev/null
+++ b/cmd/display/mode.go
@@ -0,0 +1,33 @@
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package display
+
+// Mode define how progress should be rendered, either as ModePlain or ModeTTY
+var Mode = ModeAuto
+
+const (
+ // ModeAuto detect console capabilities
+ ModeAuto = "auto"
+ // ModeTTY use terminal capability for advanced rendering
+ ModeTTY = "tty"
+ // ModePlain dump raw events to output
+ ModePlain = "plain"
+ // ModeQuiet don't display events
+ ModeQuiet = "quiet"
+ // ModeJSON outputs a machine-readable JSON stream
+ ModeJSON = "json"
+)
diff --git a/pkg/progress/plain.go b/cmd/display/plain.go
similarity index 59%
rename from pkg/progress/plain.go
rename to cmd/display/plain.go
index 3524074da23..16f2816c011 100644
--- a/pkg/progress/plain.go
+++ b/cmd/display/plain.go
@@ -14,42 +14,43 @@
limitations under the License.
*/
-package progress
+package display
import (
"context"
"fmt"
"io"
+
+ "github.com/docker/compose/v5/pkg/api"
)
+func Plain(out io.Writer) api.EventProcessor {
+ return &plainWriter{
+ out: out,
+ }
+}
+
type plainWriter struct {
- out io.Writer
- done chan bool
+ out io.Writer
+ dryRun bool
}
-func (p *plainWriter) Start(ctx context.Context) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-p.done:
- return nil
- }
+func (p *plainWriter) Start(ctx context.Context, operation string) {
}
-func (p *plainWriter) Event(e Event) {
- fmt.Fprintln(p.out, e.ID, e.Text, e.StatusText)
+func (p *plainWriter) Event(e api.Resource) {
+ prefix := ""
+ if p.dryRun {
+ prefix = DRYRUN_PREFIX
+ }
+ _, _ = fmt.Fprintln(p.out, prefix, e.ID, e.Text, e.Details)
}
-func (p *plainWriter) Events(events []Event) {
+func (p *plainWriter) On(events ...api.Resource) {
for _, e := range events {
p.Event(e)
}
}
-func (p *plainWriter) TailMsgf(m string, args ...interface{}) {
- fmt.Fprintln(p.out, append([]interface{}{m}, args...)...)
-}
-
-func (p *plainWriter) Stop() {
- p.done <- true
+func (p *plainWriter) Done(_ string, _ bool) {
}
diff --git a/pkg/progress/noop.go b/cmd/display/quiet.go
similarity index 69%
rename from pkg/progress/noop.go
rename to cmd/display/quiet.go
index 98852705c9a..8e1537d8061 100644
--- a/pkg/progress/noop.go
+++ b/cmd/display/quiet.go
@@ -14,27 +14,25 @@
limitations under the License.
*/
-package progress
+package display
import (
"context"
-)
-type noopWriter struct {
-}
+ "github.com/docker/compose/v5/pkg/api"
+)
-func (p *noopWriter) Start(ctx context.Context) error {
- return nil
+func Quiet() api.EventProcessor {
+ return &quiet{}
}
-func (p *noopWriter) Event(Event) {
-}
+type quiet struct{}
-func (p *noopWriter) Events([]Event) {
+func (q *quiet) Start(_ context.Context, _ string) {
}
-func (p *noopWriter) TailMsgf(_ string, _ ...interface{}) {
+func (q *quiet) Done(_ string, _ bool) {
}
-func (p *noopWriter) Stop() {
+func (q *quiet) On(_ ...api.Resource) {
}
diff --git a/pkg/progress/spinner.go b/cmd/display/spinner.go
similarity index 85%
rename from pkg/progress/spinner.go
rename to cmd/display/spinner.go
index 3e757acf475..e476deae80f 100644
--- a/pkg/progress/spinner.go
+++ b/cmd/display/spinner.go
@@ -14,14 +14,14 @@
limitations under the License.
*/
-package progress
+package display
import (
"runtime"
"time"
)
-type spinner struct {
+type Spinner struct {
time time.Time
index int
chars []string
@@ -29,7 +29,7 @@ type spinner struct {
done string
}
-func newSpinner() *spinner {
+func NewSpinner() *Spinner {
chars := []string{
"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏",
}
@@ -40,7 +40,7 @@ func newSpinner() *spinner {
done = "-"
}
- return &spinner{
+ return &Spinner{
index: 0,
time: time.Now(),
chars: chars,
@@ -48,7 +48,7 @@ func newSpinner() *spinner {
}
}
-func (s *spinner) String() string {
+func (s *Spinner) String() string {
if s.stop {
return s.done
}
@@ -61,6 +61,10 @@ func (s *spinner) String() string {
return s.chars[s.index]
}
-func (s *spinner) Stop() {
+func (s *Spinner) Stop() {
s.stop = true
}
+
+func (s *Spinner) Restart() {
+ s.stop = false
+}
diff --git a/cmd/display/tty.go b/cmd/display/tty.go
new file mode 100644
index 00000000000..0cb15193000
--- /dev/null
+++ b/cmd/display/tty.go
@@ -0,0 +1,436 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package display
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/docker/compose/v5/pkg/api"
+
+ "github.com/buger/goterm"
+ "github.com/docker/go-units"
+ "github.com/morikuni/aec"
+)
+
+// Full creates an EventProcessor that render advanced UI within a terminal.
+// On Start, TUI lists task with a progress timer
+func Full(out io.Writer, info io.Writer) api.EventProcessor {
+ return &ttyWriter{
+ out: out,
+ info: info,
+ tasks: map[string]task{},
+ done: make(chan bool),
+ mtx: &sync.Mutex{},
+ }
+}
+
+type ttyWriter struct {
+ out io.Writer
+ ids []string // tasks ids ordered as first event appeared
+ tasks map[string]task
+ repeated bool
+ numLines int
+ done chan bool
+ mtx *sync.Mutex
+ dryRun bool // FIXME(ndeloof) (re)implement support for dry-run
+ skipChildEvents bool
+ operation string
+ ticker *time.Ticker
+ suspended bool
+ info io.Writer
+}
+
+type task struct {
+ ID string
+ parentID string
+ startTime time.Time
+ endTime time.Time
+ text string
+ details string
+ status api.EventStatus
+ current int64
+ percent int
+ total int64
+ spinner *Spinner
+}
+
+func (t *task) stop() {
+ t.endTime = time.Now()
+ t.spinner.Stop()
+}
+
+func (t *task) hasMore() {
+ t.spinner.Restart()
+}
+
+func (w *ttyWriter) Start(ctx context.Context, operation string) {
+ w.ticker = time.NewTicker(100 * time.Millisecond)
+ w.operation = operation
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ // interrupted
+ w.ticker.Stop()
+ return
+ case <-w.done:
+ w.print()
+ w.mtx.Lock()
+ w.ticker.Stop()
+ w.operation = ""
+ w.mtx.Unlock()
+ return
+ case <-w.ticker.C:
+ w.print()
+ }
+ }
+ }()
+}
+
+func (w *ttyWriter) Done(operation string, success bool) {
+ w.done <- true
+}
+
+func (w *ttyWriter) On(events ...api.Resource) {
+ w.mtx.Lock()
+ defer w.mtx.Unlock()
+ for _, e := range events {
+ if e.ID == "Compose" {
+ _, _ = fmt.Fprintln(w.info, ErrorColor(e.Details))
+ continue
+ }
+
+ if w.operation != "start" && (e.Text == api.StatusStarted || e.Text == api.StatusStarting) {
+ // skip those events to avoid mix with container logs
+ continue
+ }
+ w.event(e)
+ }
+}
+
+func (w *ttyWriter) event(e api.Resource) {
+ // Suspend print while a build is in progress, to avoid collision with buildkit Display
+ if e.Text == api.StatusBuilding {
+ w.ticker.Stop()
+ w.suspended = true
+ } else if w.suspended {
+ w.ticker.Reset(100 * time.Millisecond)
+ w.suspended = false
+ }
+
+ if last, ok := w.tasks[e.ID]; ok {
+ switch e.Status {
+ case api.Done, api.Error, api.Warning:
+ if last.status != e.Status {
+ last.stop()
+ }
+ case api.Working:
+ last.hasMore()
+ }
+ last.status = e.Status
+ last.text = e.Text
+ last.details = e.Details
+ // progress can only go up
+ if e.Total > last.total {
+ last.total = e.Total
+ }
+ if e.Current > last.current {
+ last.current = e.Current
+ }
+ if e.Percent > last.percent {
+ last.percent = e.Percent
+ }
+ // allow set/unset of parent, but not swapping otherwise prompt is flickering
+ if last.parentID == "" || e.ParentID == "" {
+ last.parentID = e.ParentID
+ }
+ w.tasks[e.ID] = last
+ } else {
+ t := task{
+ ID: e.ID,
+ parentID: e.ParentID,
+ startTime: time.Now(),
+ text: e.Text,
+ details: e.Details,
+ status: e.Status,
+ current: e.Current,
+ percent: e.Percent,
+ total: e.Total,
+ spinner: NewSpinner(),
+ }
+ if e.Status == api.Done || e.Status == api.Error {
+ t.stop()
+ }
+ w.tasks[e.ID] = t
+ w.ids = append(w.ids, e.ID)
+ }
+ w.printEvent(e)
+}
+
+func (w *ttyWriter) printEvent(e api.Resource) {
+ if w.operation != "" {
+ // event will be displayed by progress UI on ticker's ticks
+ return
+ }
+
+ var color colorFunc
+ switch e.Status {
+ case api.Working:
+ color = SuccessColor
+ case api.Done:
+ color = SuccessColor
+ case api.Warning:
+ color = WarningColor
+ case api.Error:
+ color = ErrorColor
+ }
+ _, _ = fmt.Fprintf(w.out, "%s %s %s\n", e.ID, color(e.Text), e.Details)
+}
+
+func (w *ttyWriter) print() {
+ w.mtx.Lock()
+ defer w.mtx.Unlock()
+ if len(w.tasks) == 0 {
+ return
+ }
+ terminalWidth := goterm.Width()
+ b := aec.EmptyBuilder
+ for i := 0; i <= w.numLines; i++ {
+ b = b.Up(1)
+ }
+ if !w.repeated {
+ b = b.Down(1)
+ }
+ w.repeated = true
+ _, _ = fmt.Fprint(w.out, b.Column(0).ANSI)
+
+ // Hide the cursor while we are printing
+ _, _ = fmt.Fprint(w.out, aec.Hide)
+ defer func() {
+ _, _ = fmt.Fprint(w.out, aec.Show)
+ }()
+
+ firstLine := fmt.Sprintf("[+] %s %d/%d", w.operation, numDone(w.tasks), len(w.tasks))
+ _, _ = fmt.Fprintln(w.out, firstLine)
+
+ var statusPadding int
+ for _, t := range w.tasks {
+ l := len(t.ID)
+ if statusPadding < l {
+ statusPadding = l
+ }
+ if t.parentID != "" {
+ statusPadding -= 2
+ }
+ }
+
+ if len(w.tasks) > goterm.Height()-2 {
+ w.skipChildEvents = true
+ }
+ numLines := 0
+
+ for _, id := range w.ids { // iterate on ids to enforce a consistent order
+ t := w.tasks[id]
+ if t.parentID != "" {
+ continue
+ }
+ line := w.lineText(t, "", terminalWidth, statusPadding, w.dryRun)
+ _, _ = fmt.Fprint(w.out, line)
+ numLines++
+ for _, t := range w.tasks {
+ if t.parentID == t.ID {
+ if w.skipChildEvents {
+ continue
+ }
+ line := w.lineText(t, " ", terminalWidth, statusPadding, w.dryRun)
+ _, _ = fmt.Fprint(w.out, line)
+ numLines++
+ }
+ }
+ }
+ for i := numLines; i < w.numLines; i++ {
+ if numLines < goterm.Height()-2 {
+ _, _ = fmt.Fprintln(w.out, strings.Repeat(" ", terminalWidth))
+ numLines++
+ }
+ }
+ w.numLines = numLines
+}
+
+func (w *ttyWriter) lineText(t task, pad string, terminalWidth, statusPadding int, dryRun bool) string {
+ endTime := time.Now()
+ if t.status != api.Working {
+ endTime = t.startTime
+ if (t.endTime != time.Time{}) {
+ endTime = t.endTime
+ }
+ }
+ prefix := ""
+ if dryRun {
+ prefix = PrefixColor(DRYRUN_PREFIX)
+ }
+
+ elapsed := endTime.Sub(t.startTime).Seconds()
+
+ var (
+ hideDetails bool
+ total int64
+ current int64
+ completion []string
+ )
+
+ // only show the aggregated progress while the root operation is in-progress
+ if parent := t; parent.status == api.Working {
+ for _, id := range w.ids {
+ child := w.tasks[id]
+ if child.parentID == parent.ID {
+ if child.status == api.Working && child.total == 0 {
+ // we don't have totals available for all the child events
+ // so don't show the total progress yet
+ hideDetails = true
+ }
+ total += child.total
+ current += child.current
+ completion = append(completion, percentChars[(len(percentChars)-1)*child.percent/100])
+ }
+ }
+ }
+
+ // don't try to show detailed progress if we don't have any idea
+ if total == 0 {
+ hideDetails = true
+ }
+
+ txt := t.ID
+ if len(completion) > 0 {
+ var progress string
+ if !hideDetails {
+ progress = fmt.Sprintf(" %7s / %-7s", units.HumanSize(float64(current)), units.HumanSize(float64(total)))
+ }
+ txt = fmt.Sprintf("%s [%s]%s",
+ t.ID,
+ SuccessColor(strings.Join(completion, "")),
+ progress,
+ )
+ }
+ textLen := len(txt)
+ padding := statusPadding - textLen
+ if padding < 0 {
+ padding = 0
+ }
+ // calculate the max length for the status text, on errors it
+ // is 2-3 lines long and breaks the line formatting
+ maxDetailsLen := terminalWidth - textLen - statusPadding - 15
+ details := t.details
+ // in some cases (debugging under VS Code), terminalWidth is set to zero by goterm.Width() ; ensuring we don't tweak strings with negative char index
+ if maxDetailsLen > 0 && len(details) > maxDetailsLen {
+ details = details[:maxDetailsLen] + "..."
+ }
+ text := fmt.Sprintf("%s %s%s %s %s%s %s",
+ pad,
+ spinner(t),
+ prefix,
+ txt,
+ strings.Repeat(" ", padding),
+ colorFn(t.status)(t.text),
+ details,
+ )
+ timer := fmt.Sprintf("%.1fs ", elapsed)
+ o := align(text, TimerColor(timer), terminalWidth)
+
+ return o
+}
+
+var (
+ spinnerDone = "✔"
+ spinnerWarning = "!"
+ spinnerError = "✘"
+)
+
+func spinner(t task) string {
+ switch t.status {
+ case api.Done:
+ return SuccessColor(spinnerDone)
+ case api.Warning:
+ return WarningColor(spinnerWarning)
+ case api.Error:
+ return ErrorColor(spinnerError)
+ default:
+ return CountColor(t.spinner.String())
+ }
+}
+
+func colorFn(s api.EventStatus) colorFunc {
+ switch s {
+ case api.Done:
+ return SuccessColor
+ case api.Warning:
+ return WarningColor
+ case api.Error:
+ return ErrorColor
+ default:
+ return nocolor
+ }
+}
+
+func numDone(tasks map[string]task) int {
+ i := 0
+ for _, t := range tasks {
+ if t.status != api.Working {
+ i++
+ }
+ }
+ return i
+}
+
+func align(l, r string, w int) string {
+ ll := lenAnsi(l)
+ lr := lenAnsi(r)
+ pad := ""
+ count := w - ll - lr
+ if count > 0 {
+ pad = strings.Repeat(" ", count)
+ }
+ return fmt.Sprintf("%s%s%s\n", l, pad, r)
+}
+
+// lenAnsi count of user-perceived characters in ANSI string.
+func lenAnsi(s string) int {
+ length := 0
+ ansiCode := false
+ for _, r := range s {
+ if r == '\x1b' {
+ ansiCode = true
+ continue
+ }
+ if ansiCode && r == 'm' {
+ ansiCode = false
+ continue
+ }
+ if !ansiCode {
+ length++
+ }
+ }
+ return length
+}
+
+var percentChars = strings.Split("⠀⡀⣀⣄⣤⣦⣶⣷⣿", "")
diff --git a/cmd/formatter/ansi.go b/cmd/formatter/ansi.go
new file mode 100644
index 00000000000..14429687bc4
--- /dev/null
+++ b/cmd/formatter/ansi.go
@@ -0,0 +1,100 @@
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package formatter
+
+import (
+ "fmt"
+
+ "github.com/acarl005/stripansi"
+)
+
+var disableAnsi bool
+
+func ansi(code string) string {
+ return fmt.Sprintf("\033%s", code)
+}
+
+func saveCursor() {
+ if disableAnsi {
+ return
+ }
+ fmt.Print(ansi("7"))
+}
+
+func restoreCursor() {
+ if disableAnsi {
+ return
+ }
+ fmt.Print(ansi("8"))
+}
+
+func showCursor() {
+ if disableAnsi {
+ return
+ }
+ fmt.Print(ansi("[?25h"))
+}
+
+func moveCursor(y, x int) {
+ if disableAnsi {
+ return
+ }
+ fmt.Print(ansi(fmt.Sprintf("[%d;%dH", y, x)))
+}
+
+func carriageReturn() {
+ if disableAnsi {
+ return
+ }
+ fmt.Print(ansi(fmt.Sprintf("[%dG", 0)))
+}
+
+func clearLine() {
+ if disableAnsi {
+ return
+ }
+ // Does not move cursor from its current position
+ fmt.Print(ansi("[2K"))
+}
+
+func moveCursorUp(lines int) {
+ if disableAnsi {
+ return
+ }
+ // Does not add new lines
+ fmt.Print(ansi(fmt.Sprintf("[%dA", lines)))
+}
+
+func moveCursorDown(lines int) {
+ if disableAnsi {
+ return
+ }
+ // Does not add new lines
+ fmt.Print(ansi(fmt.Sprintf("[%dB", lines)))
+}
+
+func newLine() {
+ // Like \n
+ fmt.Print("\012")
+}
+
+func lenAnsi(s string) int {
+ // len has into consideration ansi codes, if we want
+ // the len of the actual len(string) we need to strip
+ // all ansi codes
+ return len(stripansi.Strip(s))
+}
diff --git a/cmd/formatter/colors.go b/cmd/formatter/colors.go
index 8c24808829f..ea0e1a26362 100644
--- a/cmd/formatter/colors.go
+++ b/cmd/formatter/colors.go
@@ -18,10 +18,11 @@ package formatter
import (
"fmt"
- "os"
"strconv"
+ "strings"
+ "sync"
- "github.com/mattn/go-isatty"
+ "github.com/docker/cli/cli/command"
)
var names = []string{
@@ -35,6 +36,18 @@ var names = []string{
"white",
}
+const (
+ BOLD = "1"
+ FAINT = "2"
+ ITALIC = "3"
+ UNDERLINE = "4"
+)
+
+const (
+ RESET = "0"
+ CYAN = "36"
+)
+
const (
// Never use ANSI codes
Never = "never"
@@ -46,21 +59,25 @@ const (
Auto = "auto"
)
+// ansiColorOffset is the offset for basic foreground colors in ANSI escape codes.
+const ansiColorOffset = 30
+
// SetANSIMode configure formatter for colored output on ANSI-compliant console
-func SetANSIMode(ansi string) {
- if !useAnsi(ansi) {
+func SetANSIMode(streams command.Streams, ansi string) {
+ if !useAnsi(streams, ansi) {
nextColor = func() colorFunc {
return monochrome
}
+ disableAnsi = true
}
}
-func useAnsi(ansi string) bool {
+func useAnsi(streams command.Streams, ansi string) bool {
switch ansi {
case Always:
return true
case Auto:
- return isatty.IsTerminal(os.Stdout.Fd())
+ return streams.Out().IsTerminal()
}
return false
}
@@ -72,12 +89,21 @@ var monochrome = func(s string) string {
return s
}
-func ansiColor(code, s string) string {
- return fmt.Sprintf("%s%s%s", ansi(code), s, ansi("0"))
+func ansiColor(code, s string, formatOpts ...string) string {
+ return fmt.Sprintf("%s%s%s", ansiColorCode(code, formatOpts...), s, ansiColorCode("0"))
}
-func ansi(code string) string {
- return fmt.Sprintf("\033[%sm", code)
+// Everything about ansiColorCode color https://hyperskill.org/learn/step/18193
+func ansiColorCode(code string, formatOpts ...string) string {
+ var sb strings.Builder
+ sb.WriteString("\033[")
+ for _, c := range formatOpts {
+ sb.WriteString(c)
+ sb.WriteString(";")
+ }
+ sb.WriteString(code)
+ sb.WriteString("m")
+ return sb.String()
}
func makeColorFunc(code string) colorFunc {
@@ -86,39 +112,37 @@ func makeColorFunc(code string) colorFunc {
}
}
-var nextColor = rainbowColor
+var (
+ nextColor = rainbowColor
+ rainbow []colorFunc
+ currentIndex = 0
+ mutex sync.Mutex
+)
func rainbowColor() colorFunc {
- return <-loop
+ mutex.Lock()
+ defer mutex.Unlock()
+ result := rainbow[currentIndex]
+ currentIndex = (currentIndex + 1) % len(rainbow)
+ return result
}
-var loop = make(chan colorFunc)
-
func init() {
colors := map[string]colorFunc{}
for i, name := range names {
- colors[name] = makeColorFunc(strconv.Itoa(30 + i))
- colors["intense_"+name] = makeColorFunc(strconv.Itoa(30+i) + ";1")
+ colors[name] = makeColorFunc(strconv.Itoa(ansiColorOffset + i))
+ colors["intense_"+name] = makeColorFunc(strconv.Itoa(ansiColorOffset+i) + ";1")
+ }
+ rainbow = []colorFunc{
+ colors["cyan"],
+ colors["yellow"],
+ colors["green"],
+ colors["magenta"],
+ colors["blue"],
+ colors["intense_cyan"],
+ colors["intense_yellow"],
+ colors["intense_green"],
+ colors["intense_magenta"],
+ colors["intense_blue"],
}
-
- go func() {
- i := 0
- rainbow := []colorFunc{
- colors["cyan"],
- colors["yellow"],
- colors["green"],
- colors["magenta"],
- colors["blue"],
- colors["intense_cyan"],
- colors["intense_yellow"],
- colors["intense_green"],
- colors["intense_magenta"],
- colors["intense_blue"],
- }
-
- for {
- loop <- rainbow[i]
- i = (i + 1) % len(rainbow)
- }
- }()
}
diff --git a/cmd/formatter/consts.go b/cmd/formatter/consts.go
index 0bb06bf7cb1..c60338ad9bf 100644
--- a/cmd/formatter/consts.go
+++ b/cmd/formatter/consts.go
@@ -17,10 +17,13 @@
package formatter
const (
- // JSON is the constant for Json formats on list commands
+ // JSON Print in JSON format
JSON = "json"
// TemplateLegacyJSON the legacy json formatting value using go template
TemplateLegacyJSON = "{{json.}}"
// PRETTY is the constant for default formats on list commands
+ // Deprecated: use TABLE
PRETTY = "pretty"
+ // TABLE Print output in table format with column headers (default)
+ TABLE = "table"
)
diff --git a/cmd/formatter/container.go b/cmd/formatter/container.go
new file mode 100644
index 00000000000..074ef00c18b
--- /dev/null
+++ b/cmd/formatter/container.go
@@ -0,0 +1,287 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package formatter
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/docker/cli/cli/command/formatter"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/pkg/stringid"
+ "github.com/docker/go-units"
+)
+
+const (
+ defaultContainerTableFormat = "table {{.Name}}\t{{.Image}}\t{{.Command}}\t{{.Service}}\t{{.RunningFor}}\t{{.Status}}\t{{.Ports}}"
+
+ nameHeader = "NAME"
+ projectHeader = "PROJECT"
+ serviceHeader = "SERVICE"
+ commandHeader = "COMMAND"
+ runningForHeader = "CREATED"
+ mountsHeader = "MOUNTS"
+ localVolumes = "LOCAL VOLUMES"
+ networksHeader = "NETWORKS"
+)
+
+// NewContainerFormat returns a Format for rendering using a Context
+func NewContainerFormat(source string, quiet bool, size bool) formatter.Format {
+ switch source {
+ case formatter.TableFormatKey, "": // table formatting is the default if none is set.
+ if quiet {
+ return formatter.DefaultQuietFormat
+ }
+ format := defaultContainerTableFormat
+ if size {
+ format += `\t{{.Size}}`
+ }
+ return formatter.Format(format)
+ case formatter.RawFormatKey:
+ if quiet {
+ return `container_id: {{.ID}}`
+ }
+ format := `container_id: {{.ID}}
+image: {{.Image}}
+command: {{.Command}}
+created_at: {{.CreatedAt}}
+state: {{- pad .State 1 0}}
+status: {{- pad .Status 1 0}}
+names: {{.Names}}
+labels: {{- pad .Labels 1 0}}
+ports: {{- pad .Ports 1 0}}
+`
+ if size {
+ format += `size: {{.Size}}\n`
+ }
+ return formatter.Format(format)
+ default: // custom format
+ if quiet {
+ return formatter.DefaultQuietFormat
+ }
+ return formatter.Format(source)
+ }
+}
+
+// ContainerWrite renders the context for a list of containers
+func ContainerWrite(ctx formatter.Context, containers []api.ContainerSummary) error {
+ render := func(format func(subContext formatter.SubContext) error) error {
+ for _, container := range containers {
+ err := format(&ContainerContext{trunc: ctx.Trunc, c: container})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ return ctx.Write(NewContainerContext(), render)
+}
+
+// ContainerContext is a struct used for rendering a list of containers in a Go template.
+type ContainerContext struct {
+ formatter.HeaderContext
+ trunc bool
+ c api.ContainerSummary
+
+ // FieldsUsed is used in the pre-processing step to detect which fields are
+ // used in the template. It's currently only used to detect use of the .Size
+ // field which (if used) automatically sets the '--size' option when making
+ // the API call.
+ FieldsUsed map[string]interface{}
+}
+
+// NewContainerContext creates a new context for rendering containers
+func NewContainerContext() *ContainerContext {
+ containerCtx := ContainerContext{}
+ containerCtx.Header = formatter.SubHeaderContext{
+ "ID": formatter.ContainerIDHeader,
+ "Name": nameHeader,
+ "Project": projectHeader,
+ "Service": serviceHeader,
+ "Image": formatter.ImageHeader,
+ "Command": commandHeader,
+ "CreatedAt": formatter.CreatedAtHeader,
+ "RunningFor": runningForHeader,
+ "Ports": formatter.PortsHeader,
+ "State": formatter.StateHeader,
+ "Status": formatter.StatusHeader,
+ "Size": formatter.SizeHeader,
+ "Labels": formatter.LabelsHeader,
+ }
+ return &containerCtx
+}
+
+// MarshalJSON makes ContainerContext implement json.Marshaler
+func (c *ContainerContext) MarshalJSON() ([]byte, error) {
+ return formatter.MarshalJSON(c)
+}
+
+// ID returns the container's ID as a string. Depending on the `--no-trunc`
+// option being set, the full or truncated ID is returned.
+func (c *ContainerContext) ID() string {
+ if c.trunc {
+ return stringid.TruncateID(c.c.ID)
+ }
+ return c.c.ID
+}
+
+func (c *ContainerContext) Name() string {
+ return c.c.Name
+}
+
+// Names returns a comma-separated string of the container's names, with their
+// slash (/) prefix stripped. Additional names for the container (related to the
+// legacy `--link` feature) are omitted.
+func (c *ContainerContext) Names() string {
+ names := formatter.StripNamePrefix(c.c.Names)
+ if c.trunc {
+ for _, name := range names {
+ if len(strings.Split(name, "/")) == 1 {
+ names = []string{name}
+ break
+ }
+ }
+ }
+ return strings.Join(names, ",")
+}
+
+func (c *ContainerContext) Service() string {
+ return c.c.Service
+}
+
+func (c *ContainerContext) Project() string {
+ return c.c.Project
+}
+
+func (c *ContainerContext) Image() string {
+ return c.c.Image
+}
+
+func (c *ContainerContext) Command() string {
+ command := c.c.Command
+ if c.trunc {
+ command = formatter.Ellipsis(command, 20)
+ }
+ return strconv.Quote(command)
+}
+
+func (c *ContainerContext) CreatedAt() string {
+ return time.Unix(c.c.Created, 0).String()
+}
+
+func (c *ContainerContext) RunningFor() string {
+ createdAt := time.Unix(c.c.Created, 0)
+ return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago"
+}
+
+func (c *ContainerContext) ExitCode() int {
+ return c.c.ExitCode
+}
+
+func (c *ContainerContext) State() string {
+ return c.c.State
+}
+
+func (c *ContainerContext) Status() string {
+ return c.c.Status
+}
+
+func (c *ContainerContext) Health() string {
+ return c.c.Health
+}
+
+func (c *ContainerContext) Publishers() api.PortPublishers {
+ return c.c.Publishers
+}
+
+func (c *ContainerContext) Ports() string {
+ var ports []container.Port
+ for _, publisher := range c.c.Publishers {
+ ports = append(ports, container.Port{
+ IP: publisher.URL,
+ PrivatePort: uint16(publisher.TargetPort),
+ PublicPort: uint16(publisher.PublishedPort),
+ Type: publisher.Protocol,
+ })
+ }
+ return formatter.DisplayablePorts(ports)
+}
+
+// Labels returns a comma-separated string of labels present on the container.
+func (c *ContainerContext) Labels() string {
+ if c.c.Labels == nil {
+ return ""
+ }
+
+ var joinLabels []string
+ for k, v := range c.c.Labels {
+ joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v))
+ }
+ return strings.Join(joinLabels, ",")
+}
+
+// Label returns the value of the label with the given name or an empty string
+// if the given label does not exist.
+func (c *ContainerContext) Label(name string) string {
+ if c.c.Labels == nil {
+ return ""
+ }
+ return c.c.Labels[name]
+}
+
+// Mounts returns a comma-separated string of mount names present on the container.
+// If the trunc option is set, names can be truncated (ellipsized).
+func (c *ContainerContext) Mounts() string {
+ var mounts []string
+ for _, name := range c.c.Mounts {
+ if c.trunc {
+ name = formatter.Ellipsis(name, 15)
+ }
+ mounts = append(mounts, name)
+ }
+ return strings.Join(mounts, ",")
+}
+
+// LocalVolumes returns the number of volumes using the "local" volume driver.
+func (c *ContainerContext) LocalVolumes() string {
+ return fmt.Sprintf("%d", c.c.LocalVolumes)
+}
+
+// Networks returns a comma-separated string of networks that the container is
+// attached to.
+func (c *ContainerContext) Networks() string {
+ return strings.Join(c.c.Networks, ",")
+}
+
+// Size returns the container's size and virtual size (e.g. "2B (virtual 21.5MB)")
+func (c *ContainerContext) Size() string {
+ if c.FieldsUsed == nil {
+ c.FieldsUsed = map[string]interface{}{}
+ }
+ c.FieldsUsed["Size"] = struct{}{}
+ srw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3)
+ sv := units.HumanSizeWithPrecision(float64(c.c.SizeRootFs), 3)
+
+ sf := srw
+ if c.c.SizeRootFs > 0 {
+ sf = fmt.Sprintf("%s (virtual %s)", srw, sv)
+ }
+ return sf
+}
diff --git a/cmd/formatter/formatter.go b/cmd/formatter/formatter.go
index cf26a7885df..265eff8c5e8 100644
--- a/cmd/formatter/formatter.go
+++ b/cmd/formatter/formatter.go
@@ -22,15 +22,13 @@ import (
"reflect"
"strings"
- "github.com/docker/compose/v2/pkg/api"
-
- "github.com/pkg/errors"
+ "github.com/docker/compose/v5/pkg/api"
)
// Print prints formatted lists in different formats
func Print(toJSON interface{}, format string, outWriter io.Writer, writerFn func(w io.Writer), headers ...string) error {
switch strings.ToLower(format) {
- case PRETTY, "":
+ case TABLE, PRETTY, "":
return PrintPrettySection(outWriter, writerFn, headers...)
case TemplateLegacyJSON:
switch reflect.TypeOf(toJSON).Kind() {
@@ -67,7 +65,7 @@ func Print(toJSON interface{}, format string, outWriter io.Writer, writerFn func
_, _ = fmt.Fprintln(outWriter, outJSON)
}
default:
- return errors.Wrapf(api.ErrParsingFailed, "format value %q could not be parsed", format)
+ return fmt.Errorf("format value %q could not be parsed: %w", format, api.ErrParsingFailed)
}
return nil
}
diff --git a/cmd/formatter/formatter_test.go b/cmd/formatter/formatter_test.go
index be7f0bd2c5f..9da0dc85c6e 100644
--- a/cmd/formatter/formatter_test.go
+++ b/cmd/formatter/formatter_test.go
@@ -22,7 +22,8 @@ import (
"io"
"testing"
- "gotest.tools/assert"
+ "go.uber.org/goleak"
+ "gotest.tools/v3/assert"
)
type testStruct struct {
@@ -71,3 +72,7 @@ func TestPrint(t *testing.T) {
{"Name":"myName2","Status":"myStatus2"}
`)
}
+
+func TestColorsGoroutinesLeak(t *testing.T) {
+ goleak.VerifyNone(t)
+}
diff --git a/cmd/formatter/logs.go b/cmd/formatter/logs.go
index 5543ca86488..949ef8a8448 100644
--- a/cmd/formatter/logs.go
+++ b/cmd/formatter/logs.go
@@ -23,38 +23,69 @@ import (
"strconv"
"strings"
"sync"
+ "time"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/buger/goterm"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/pkg/jsonmessage"
)
+// LogConsumer consume logs from services and format them
+type logConsumer struct {
+ ctx context.Context
+ presenters sync.Map // map[string]*presenter
+ width int
+ stdout io.Writer
+ stderr io.Writer
+ color bool
+ prefix bool
+ timestamp bool
+}
+
// NewLogConsumer creates a new LogConsumer
-func NewLogConsumer(ctx context.Context, w io.Writer, color bool, prefix bool) api.LogConsumer {
+func NewLogConsumer(ctx context.Context, stdout, stderr io.Writer, color, prefix, timestamp bool) api.LogConsumer {
return &logConsumer{
ctx: ctx,
presenters: sync.Map{},
width: 0,
- writer: w,
+ stdout: stdout,
+ stderr: stderr,
color: color,
prefix: prefix,
+ timestamp: timestamp,
}
}
-func (l *logConsumer) Register(name string) {
- l.register(name)
-}
-
func (l *logConsumer) register(name string) *presenter {
- cf := monochrome
- if l.color {
- cf = nextColor()
- }
- p := &presenter{
- colors: cf,
- name: name,
+ var p *presenter
+ root, _, found := strings.Cut(name, " ")
+ if found {
+ parent := l.getPresenter(root)
+ p = &presenter{
+ colors: parent.colors,
+ name: name,
+ prefix: parent.prefix,
+ }
+ } else {
+ cf := monochrome
+ if l.color {
+ switch name {
+ case "":
+ cf = monochrome
+ case api.WatchLogger:
+ cf = makeColorFunc("92")
+ default:
+ cf = nextColor()
+ }
+ }
+ p = &presenter{
+ colors: cf,
+ name: name,
+ }
}
l.presenters.Store(name, p)
+ l.computeWidth()
if l.prefix {
- l.computeWidth()
l.presenters.Range(func(key, value interface{}) bool {
p := value.(*presenter)
p.setPrefix(l.width)
@@ -73,20 +104,34 @@ func (l *logConsumer) getPresenter(container string) *presenter {
}
// Log formats a log message as received from name/container
-func (l *logConsumer) Log(container, service, message string) {
+func (l *logConsumer) Log(container, message string) {
+ l.write(l.stdout, container, message)
+}
+
+// Err formats a log message as received from name/container
+func (l *logConsumer) Err(container, message string) {
+ l.write(l.stderr, container, message)
+}
+
+func (l *logConsumer) write(w io.Writer, container, message string) {
if l.ctx.Err() != nil {
return
}
p := l.getPresenter(container)
+ timestamp := time.Now().Format(jsonmessage.RFC3339NanoFixed)
for _, line := range strings.Split(message, "\n") {
- fmt.Fprintf(l.writer, "%s %s\n", p.prefix, line) // nolint:errcheck
+ if l.timestamp {
+ _, _ = fmt.Fprintf(w, "%s%s %s\n", p.prefix, timestamp, line)
+ } else {
+ _, _ = fmt.Fprintf(w, "%s%s\n", p.prefix, line)
+ }
}
}
func (l *logConsumer) Status(container, msg string) {
p := l.getPresenter(container)
- s := p.colors(fmt.Sprintf("%s %s\n", container, msg))
- l.writer.Write([]byte(s)) // nolint:errcheck
+ s := p.colors(fmt.Sprintf("%s%s %s\n", goterm.RESET_LINE, container, msg))
+ l.stdout.Write([]byte(s)) //nolint:errcheck
}
func (l *logConsumer) computeWidth() {
@@ -101,16 +146,6 @@ func (l *logConsumer) computeWidth() {
l.width = width + 1
}
-// LogConsumer consume logs from services and format them
-type logConsumer struct {
- ctx context.Context
- presenters sync.Map // map[string]*presenter
- width int
- writer io.Writer
- color bool
- prefix bool
-}
-
type presenter struct {
colors colorFunc
name string
@@ -118,5 +153,33 @@ type presenter struct {
}
func (p *presenter) setPrefix(width int) {
- p.prefix = p.colors(fmt.Sprintf("%-"+strconv.Itoa(width)+"s |", p.name))
+ if p.name == api.WatchLogger {
+ p.prefix = p.colors(strings.Repeat(" ", width) + " ⦿ ")
+ return
+ }
+ p.prefix = p.colors(fmt.Sprintf("%-"+strconv.Itoa(width)+"s | ", p.name))
+}
+
+type logDecorator struct {
+ decorated api.LogConsumer
+ Before func()
+ After func()
+}
+
+func (l logDecorator) Log(containerName, message string) {
+ l.Before()
+ l.decorated.Log(containerName, message)
+ l.After()
+}
+
+func (l logDecorator) Err(containerName, message string) {
+ l.Before()
+ l.decorated.Err(containerName, message)
+ l.After()
+}
+
+func (l logDecorator) Status(container, msg string) {
+ l.Before()
+ l.decorated.Status(container, msg)
+ l.After()
}
diff --git a/cmd/formatter/shortcut.go b/cmd/formatter/shortcut.go
new file mode 100644
index 00000000000..bb814cfa546
--- /dev/null
+++ b/cmd/formatter/shortcut.go
@@ -0,0 +1,364 @@
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package formatter
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "os"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/buger/goterm"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/compose/v5/internal/tracing"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/eiannone/keyboard"
+ "github.com/skratchdot/open-golang/open"
+)
+
+const DISPLAY_ERROR_TIME = 10
+
+type KeyboardError struct {
+ err error
+ timeStart time.Time
+}
+
+func (ke *KeyboardError) shouldDisplay() bool {
+ return ke.err != nil && int(time.Since(ke.timeStart).Seconds()) < DISPLAY_ERROR_TIME
+}
+
+func (ke *KeyboardError) printError(height int, info string) {
+ if ke.shouldDisplay() {
+ errMessage := ke.err.Error()
+
+ moveCursor(height-1-extraLines(info)-extraLines(errMessage), 0)
+ clearLine()
+
+ fmt.Print(errMessage)
+ }
+}
+
+func (ke *KeyboardError) addError(prefix string, err error) {
+ ke.timeStart = time.Now()
+
+ prefix = ansiColor(CYAN, fmt.Sprintf("%s →", prefix), BOLD)
+ errorString := fmt.Sprintf("%s %s", prefix, err.Error())
+
+ ke.err = errors.New(errorString)
+}
+
+func (ke *KeyboardError) error() string {
+ return ke.err.Error()
+}
+
+type KeyboardWatch struct {
+ Watching bool
+ Watcher Feature
+}
+
+// Feature is an compose feature that can be started/stopped by a menu command
+type Feature interface {
+ Start(context.Context) error
+ Stop() error
+}
+
+type KEYBOARD_LOG_LEVEL int
+
+const (
+ NONE KEYBOARD_LOG_LEVEL = 0
+ INFO KEYBOARD_LOG_LEVEL = 1
+ DEBUG KEYBOARD_LOG_LEVEL = 2
+)
+
+type LogKeyboard struct {
+ kError KeyboardError
+ Watch *KeyboardWatch
+ Detach func()
+ IsDockerDesktopActive bool
+ logLevel KEYBOARD_LOG_LEVEL
+ signalChannel chan<- os.Signal
+}
+
+func NewKeyboardManager(isDockerDesktopActive bool, sc chan<- os.Signal) *LogKeyboard {
+ return &LogKeyboard{
+ IsDockerDesktopActive: isDockerDesktopActive,
+ logLevel: INFO,
+ signalChannel: sc,
+ }
+}
+
+func (lk *LogKeyboard) Decorate(l api.LogConsumer) api.LogConsumer {
+ return logDecorator{
+ decorated: l,
+ Before: lk.clearNavigationMenu,
+ After: lk.PrintKeyboardInfo,
+ }
+}
+
+func (lk *LogKeyboard) PrintKeyboardInfo() {
+ if lk.logLevel == INFO {
+ lk.printNavigationMenu()
+ }
+}
+
+// Creates space to print error and menu string
+func (lk *LogKeyboard) createBuffer(lines int) {
+ if lk.kError.shouldDisplay() {
+ extraLines := extraLines(lk.kError.error()) + 1
+ lines += extraLines
+ }
+
+ // get the string
+ infoMessage := lk.navigationMenu()
+ // calculate how many lines we need to display the menu info
+ // might be needed a line break
+ extraLines := extraLines(infoMessage) + 1
+ lines += extraLines
+
+ if lines > 0 {
+ allocateSpace(lines)
+ moveCursorUp(lines)
+ }
+}
+
+func (lk *LogKeyboard) printNavigationMenu() {
+ offset := 1
+ lk.clearNavigationMenu()
+ lk.createBuffer(offset)
+
+ if lk.logLevel == INFO {
+ height := goterm.Height()
+ menu := lk.navigationMenu()
+
+ carriageReturn()
+ saveCursor()
+
+ lk.kError.printError(height, menu)
+
+ moveCursor(height-extraLines(menu), 0)
+ clearLine()
+ fmt.Print(menu)
+
+ carriageReturn()
+ restoreCursor()
+ }
+}
+
+func (lk *LogKeyboard) navigationMenu() string {
+ var items []string
+ if lk.IsDockerDesktopActive {
+ items = append(items, shortcutKeyColor("v")+navColor(" View in Docker Desktop"))
+ }
+
+ if lk.IsDockerDesktopActive {
+ items = append(items, shortcutKeyColor("o")+navColor(" View Config"))
+ }
+
+ isEnabled := " Enable"
+ if lk.Watch != nil && lk.Watch.Watching {
+ isEnabled = " Disable"
+ }
+ items = append(items, shortcutKeyColor("w")+navColor(isEnabled+" Watch"))
+ items = append(items, shortcutKeyColor("d")+navColor(" Detach"))
+
+ return strings.Join(items, " ")
+}
+
+func (lk *LogKeyboard) clearNavigationMenu() {
+ height := goterm.Height()
+ carriageReturn()
+ saveCursor()
+
+ // clearLine()
+ for i := 0; i < height; i++ {
+ moveCursorDown(1)
+ clearLine()
+ }
+ restoreCursor()
+}
+
+func (lk *LogKeyboard) openDockerDesktop(ctx context.Context, project *types.Project) {
+ if !lk.IsDockerDesktopActive {
+ return
+ }
+ go func() {
+ _ = tracing.EventWrapFuncForErrGroup(ctx, "menu/gui", tracing.SpanOptions{},
+ func(ctx context.Context) error {
+ link := fmt.Sprintf("docker-desktop://dashboard/apps/%s", project.Name)
+ err := open.Run(link)
+ if err != nil {
+ err = fmt.Errorf("could not open Docker Desktop")
+ lk.keyboardError("View", err)
+ }
+ return err
+ })()
+ }()
+}
+
+func (lk *LogKeyboard) openDDComposeUI(ctx context.Context, project *types.Project) {
+ if !lk.IsDockerDesktopActive {
+ return
+ }
+ go func() {
+ _ = tracing.EventWrapFuncForErrGroup(ctx, "menu/gui/composeview", tracing.SpanOptions{},
+ func(ctx context.Context) error {
+ link := fmt.Sprintf("docker-desktop://dashboard/docker-compose/%s", project.Name)
+ err := open.Run(link)
+ if err != nil {
+ err = fmt.Errorf("could not open Docker Desktop Compose UI")
+ lk.keyboardError("View Config", err)
+ }
+ return err
+ })()
+ }()
+}
+
+func (lk *LogKeyboard) openDDWatchDocs(ctx context.Context, project *types.Project) {
+ go func() {
+ _ = tracing.EventWrapFuncForErrGroup(ctx, "menu/gui/watch", tracing.SpanOptions{},
+ func(ctx context.Context) error {
+ link := fmt.Sprintf("docker-desktop://dashboard/docker-compose/%s/watch", project.Name)
+ err := open.Run(link)
+ if err != nil {
+ err = fmt.Errorf("could not open Docker Desktop Compose UI")
+ lk.keyboardError("Watch Docs", err)
+ }
+ return err
+ })()
+ }()
+}
+
+func (lk *LogKeyboard) keyboardError(prefix string, err error) {
+ lk.kError.addError(prefix, err)
+
+ lk.printNavigationMenu()
+ timer1 := time.NewTimer((DISPLAY_ERROR_TIME + 1) * time.Second)
+ go func() {
+ <-timer1.C
+ lk.printNavigationMenu()
+ }()
+}
+
+func (lk *LogKeyboard) ToggleWatch(ctx context.Context, options api.UpOptions) {
+ if lk.Watch == nil {
+ return
+ }
+ if lk.Watch.Watching {
+ err := lk.Watch.Watcher.Stop()
+ if err != nil {
+ options.Start.Attach.Err(api.WatchLogger, err.Error())
+ } else {
+ lk.Watch.Watching = false
+ }
+ } else {
+ go func() {
+ _ = tracing.EventWrapFuncForErrGroup(ctx, "menu/watch", tracing.SpanOptions{},
+ func(ctx context.Context) error {
+ err := lk.Watch.Watcher.Start(ctx)
+ if err != nil {
+ options.Start.Attach.Err(api.WatchLogger, err.Error())
+ } else {
+ lk.Watch.Watching = true
+ }
+ return err
+ })()
+ }()
+ }
+}
+
+func (lk *LogKeyboard) HandleKeyEvents(ctx context.Context, event keyboard.KeyEvent, project *types.Project, options api.UpOptions) {
+ switch kRune := event.Rune; kRune {
+ case 'd':
+ lk.clearNavigationMenu()
+ lk.Detach()
+ case 'v':
+ lk.openDockerDesktop(ctx, project)
+ case 'w':
+ if lk.Watch == nil {
+ // we try to open watch docs if DD is installed
+ if lk.IsDockerDesktopActive {
+ lk.openDDWatchDocs(ctx, project)
+ }
+ // either way we mark menu/watch as an error
+ go func() {
+ _ = tracing.EventWrapFuncForErrGroup(ctx, "menu/watch", tracing.SpanOptions{},
+ func(ctx context.Context) error {
+ err := fmt.Errorf("watch is not yet configured. Learn more: %s", ansiColor(CYAN, "https://docs.docker.com/compose/file-watch/"))
+ lk.keyboardError("Watch", err)
+ return err
+ })()
+ }()
+ }
+ lk.ToggleWatch(ctx, options)
+ case 'o':
+ lk.openDDComposeUI(ctx, project)
+ }
+ switch key := event.Key; key {
+ case keyboard.KeyCtrlC:
+ _ = keyboard.Close()
+ lk.clearNavigationMenu()
+ showCursor()
+
+ lk.logLevel = NONE
+ // will notify main thread to kill and will handle gracefully
+ lk.signalChannel <- syscall.SIGINT
+ case keyboard.KeyCtrlZ:
+ handleCtrlZ()
+ case keyboard.KeyEnter:
+ newLine()
+ lk.printNavigationMenu()
+ }
+}
+
+func (lk *LogKeyboard) EnableWatch(enabled bool, watcher Feature) {
+ lk.Watch = &KeyboardWatch{
+ Watching: enabled,
+ Watcher: watcher,
+ }
+}
+
+func (lk *LogKeyboard) EnableDetach(detach func()) {
+ lk.Detach = detach
+}
+
+func allocateSpace(lines int) {
+ for i := 0; i < lines; i++ {
+ clearLine()
+ newLine()
+ carriageReturn()
+ }
+}
+
+func extraLines(s string) int {
+ return int(math.Floor(float64(lenAnsi(s)) / float64(goterm.Width())))
+}
+
+func shortcutKeyColor(key string) string {
+ foreground := "38;2"
+ black := "0;0;0"
+ background := "48;2"
+ white := "255;255;255"
+ return ansiColor(foreground+";"+black+";"+background+";"+white, key, BOLD)
+}
+
+func navColor(key string) string {
+ return ansiColor(FAINT, key)
+}
diff --git a/cmd/formatter/shortcut_unix.go b/cmd/formatter/shortcut_unix.go
new file mode 100644
index 00000000000..0baa3a949cc
--- /dev/null
+++ b/cmd/formatter/shortcut_unix.go
@@ -0,0 +1,25 @@
+//go:build !windows
+
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package formatter
+
+import "syscall"
+
+func handleCtrlZ() {
+ _ = syscall.Kill(0, syscall.SIGSTOP)
+}
diff --git a/cmd/formatter/shortcut_windows.go b/cmd/formatter/shortcut_windows.go
new file mode 100644
index 00000000000..1efa14cc2dd
--- /dev/null
+++ b/cmd/formatter/shortcut_windows.go
@@ -0,0 +1,25 @@
+//go:build windows
+
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package formatter
+
+// handleCtrlZ is a no-op on Windows as SIGSTOP is not supported
+func handleCtrlZ() {
+ // Windows doesn't support SIGSTOP/SIGCONT signals
+ // Ctrl+Z behavior is handled differently by the Windows terminal
+}
diff --git a/cmd/main.go b/cmd/main.go
index 2325d975e1e..c5afdb5338e 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -20,46 +20,61 @@ import (
"os"
dockercli "github.com/docker/cli/cli"
- "github.com/docker/cli/cli-plugins/manager"
+ "github.com/docker/cli/cli-plugins/metadata"
"github.com/docker/cli/cli-plugins/plugin"
"github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/cmd/cmdtrace"
+ "github.com/docker/compose/v5/cmd/prompt"
+ "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
- "github.com/docker/compose/v2/cmd/compatibility"
- commands "github.com/docker/compose/v2/cmd/compose"
- "github.com/docker/compose/v2/internal"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/compose"
+ "github.com/docker/compose/v5/cmd/compatibility"
+ commands "github.com/docker/compose/v5/cmd/compose"
+ "github.com/docker/compose/v5/internal"
+ "github.com/docker/compose/v5/pkg/compose"
)
func pluginMain() {
- plugin.Run(func(dockerCli command.Cli) *cobra.Command {
- lazyInit := api.NewServiceProxy()
- cmd := commands.RootCommand(dockerCli, lazyInit)
- originalPreRun := cmd.PersistentPreRunE
- cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
- if err := plugin.PersistentPreRunE(cmd, args); err != nil {
- return err
+ plugin.Run(
+ func(cli command.Cli) *cobra.Command {
+ backendOptions := &commands.BackendOptions{
+ Options: []compose.Option{
+ compose.WithPrompt(prompt.NewPrompt(cli.In(), cli.Out()).Confirm),
+ },
}
- lazyInit.WithService(compose.NewComposeService(dockerCli))
- if originalPreRun != nil {
- return originalPreRun(cmd, args)
- }
- return nil
- }
- cmd.SetFlagErrorFunc(func(c *cobra.Command, err error) error {
- return dockercli.StatusError{
- StatusCode: compose.CommandSyntaxFailure.ExitCode,
- Status: err.Error(),
+
+ cmd := commands.RootCommand(cli, backendOptions)
+ originalPreRunE := cmd.PersistentPreRunE
+ cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
+ // initialize the cli instance
+ if err := plugin.PersistentPreRunE(cmd, args); err != nil {
+ return err
+ }
+ if err := cmdtrace.Setup(cmd, cli, os.Args[1:]); err != nil {
+ logrus.Debugf("failed to enable tracing: %v", err)
+ }
+
+ if originalPreRunE != nil {
+ return originalPreRunE(cmd, args)
+ }
+ return nil
}
- })
- return cmd
- },
- manager.Metadata{
+
+ cmd.SetFlagErrorFunc(func(c *cobra.Command, err error) error {
+ return dockercli.StatusError{
+ StatusCode: 1,
+ Status: err.Error(),
+ }
+ })
+ return cmd
+ },
+ metadata.Metadata{
SchemaVersion: "0.1.0",
Vendor: "Docker Inc.",
Version: internal.Version,
- })
+ },
+ command.WithUserAgent("compose/"+internal.Version),
+ )
}
func main() {
diff --git a/cmd/prompt/prompt.go b/cmd/prompt/prompt.go
new file mode 100644
index 00000000000..722ba64b0b1
--- /dev/null
+++ b/cmd/prompt/prompt.go
@@ -0,0 +1,101 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package prompt
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/AlecAivazis/survey/v2"
+ "github.com/docker/cli/cli/streams"
+ "github.com/docker/compose/v5/pkg/utils"
+)
+
+//go:generate mockgen -destination=./prompt_mock.go -self_package "github.com/docker/compose/v5/pkg/prompt" -package=prompt . UI
+
+// UI - prompt user input
+type UI interface {
+ Confirm(message string, defaultValue bool) (bool, error)
+}
+
+func NewPrompt(stdin *streams.In, stdout *streams.Out) UI {
+ if stdin.IsTerminal() {
+ return User{stdin: streamsFileReader{stdin}, stdout: streamsFileWriter{stdout}}
+ }
+ return Pipe{stdin: stdin, stdout: stdout}
+}
+
+// User - in a terminal
+type User struct {
+ stdout streamsFileWriter
+ stdin streamsFileReader
+}
+
+// adapt streams.Out to terminal.FileWriter
+type streamsFileWriter struct {
+ stream *streams.Out
+}
+
+func (s streamsFileWriter) Write(p []byte) (n int, err error) {
+ return s.stream.Write(p)
+}
+
+func (s streamsFileWriter) Fd() uintptr {
+ return s.stream.FD()
+}
+
+// adapt streams.In to terminal.FileReader
+type streamsFileReader struct {
+ stream *streams.In
+}
+
+func (s streamsFileReader) Read(p []byte) (n int, err error) {
+ return s.stream.Read(p)
+}
+
+func (s streamsFileReader) Fd() uintptr {
+ return s.stream.FD()
+}
+
+// Confirm asks for yes or no input
+func (u User) Confirm(message string, defaultValue bool) (bool, error) {
+ qs := &survey.Confirm{
+ Message: message,
+ Default: defaultValue,
+ }
+ var b bool
+ err := survey.AskOne(qs, &b, func(options *survey.AskOptions) error {
+ options.Stdio.In = u.stdin
+ options.Stdio.Out = u.stdout
+ return nil
+ })
+ return b, err
+}
+
+// Pipe - aggregates prompt methods
+type Pipe struct {
+ stdout io.Writer
+ stdin io.Reader
+}
+
+// Confirm asks for yes or no input
+func (u Pipe) Confirm(message string, defaultValue bool) (bool, error) {
+ _, _ = fmt.Fprint(u.stdout, message)
+ var answer string
+ _, _ = fmt.Fscanln(u.stdin, &answer)
+ return utils.StringToBool(answer), nil
+}
diff --git a/pkg/prompt/prompt_mock.go b/cmd/prompt/prompt_mock.go
similarity index 95%
rename from pkg/prompt/prompt_mock.go
rename to cmd/prompt/prompt_mock.go
index 9acee1ddba6..83b0ff1189b 100644
--- a/pkg/prompt/prompt_mock.go
+++ b/cmd/prompt/prompt_mock.go
@@ -1,5 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
-// Container: github.com/docker/compose-cli/pkg/prompt (interfaces: UI)
+// Source: github.com/docker/compose-cli/pkg/prompt (interfaces: UI)
// Package prompt is a generated GoMock package.
package prompt
@@ -7,7 +7,7 @@ package prompt
import (
reflect "reflect"
- gomock "github.com/golang/mock/gomock"
+ gomock "go.uber.org/mock/gomock"
)
// MockUI is a mock of UI interface
diff --git a/codecov.yml b/codecov.yml
new file mode 100644
index 00000000000..a66912f32e0
--- /dev/null
+++ b/codecov.yml
@@ -0,0 +1,21 @@
+coverage:
+ status:
+ project:
+ default:
+ informational: true
+ target: auto
+ threshold: 2%
+ patch:
+ default:
+ informational: true
+
+comment:
+ require_changes: true
+
+ignore:
+ - "packaging"
+ - "docs"
+ - "bin"
+ - "e2e"
+ - "pkg/e2e"
+ - "**/*_test.go"
diff --git a/docker-bake.hcl b/docker-bake.hcl
new file mode 100644
index 00000000000..5c6522d3a81
--- /dev/null
+++ b/docker-bake.hcl
@@ -0,0 +1,148 @@
+// Copyright 2022 Docker Compose CLI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+variable "GO_VERSION" {
+ # default ARG value set in Dockerfile
+ default = null
+}
+
+variable "BUILD_TAGS" {
+ default = "e2e"
+}
+
+variable "DOCS_FORMATS" {
+ default = "md,yaml"
+}
+
+# Defines the output folder to override the default behavior.
+# See Makefile for details, this is generally only useful for
+# the packaging scripts and care should be taken to not break
+# them.
+variable "DESTDIR" {
+ default = ""
+}
+function "outdir" {
+ params = [defaultdir]
+ result = DESTDIR != "" ? DESTDIR : "${defaultdir}"
+}
+
+# Special target: https://github.com/docker/metadata-action#bake-definition
+target "meta-helper" {}
+
+target "_common" {
+ args = {
+ GO_VERSION = GO_VERSION
+ BUILD_TAGS = BUILD_TAGS
+ BUILDKIT_CONTEXT_KEEP_GIT_DIR = 1
+ }
+}
+
+group "default" {
+ targets = ["binary"]
+}
+
+group "validate" {
+ targets = ["lint", "vendor-validate", "license-validate"]
+}
+
+target "lint" {
+ inherits = ["_common"]
+ target = "lint"
+ output = ["type=cacheonly"]
+}
+
+target "license-validate" {
+ target = "license-validate"
+ output = ["type=cacheonly"]
+}
+
+target "license-update" {
+ target = "license-update"
+ output = ["."]
+}
+
+target "vendor-validate" {
+ inherits = ["_common"]
+ target = "vendor-validate"
+ output = ["type=cacheonly"]
+}
+
+target "vendor-update" {
+ inherits = ["_common"]
+ target = "vendor-update"
+ output = ["."]
+}
+
+target "test" {
+ inherits = ["_common"]
+ target = "test-coverage"
+ output = [outdir("./bin/coverage/unit")]
+}
+
+target "binary-with-coverage" {
+ inherits = ["_common"]
+ target = "binary"
+ args = {
+ BUILD_FLAGS = "-cover -covermode=atomic"
+ }
+ output = [outdir("./bin/build")]
+ platforms = ["local"]
+}
+
+target "binary" {
+ inherits = ["_common"]
+ target = "binary"
+ output = [outdir("./bin/build")]
+ platforms = ["local"]
+}
+
+target "binary-cross" {
+ inherits = ["binary"]
+ platforms = [
+ "darwin/amd64",
+ "darwin/arm64",
+ "linux/amd64",
+ "linux/arm/v6",
+ "linux/arm/v7",
+ "linux/arm64",
+ "linux/ppc64le",
+ "linux/riscv64",
+ "linux/s390x",
+ "windows/amd64",
+ "windows/arm64"
+ ]
+}
+
+target "release" {
+ inherits = ["binary-cross"]
+ target = "release"
+ output = [outdir("./bin/release")]
+}
+
+target "docs-validate" {
+ inherits = ["_common"]
+ target = "docs-validate"
+ output = ["type=cacheonly"]
+}
+
+target "docs-update" {
+ inherits = ["_common"]
+ target = "docs-update"
+ output = ["./docs"]
+}
+
+target "image-cross" {
+ inherits = ["meta-helper", "binary-cross"]
+ output = ["type=image"]
+}
diff --git a/docs/docs.Dockerfile b/docs/docs.Dockerfile
deleted file mode 100644
index 3eb163b5976..00000000000
--- a/docs/docs.Dockerfile
+++ /dev/null
@@ -1,57 +0,0 @@
-# syntax=docker/dockerfile:1.3-labs
-
-
-# Copyright 2020 Docker Compose CLI authors
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-ARG GO_VERSION=1.17
-ARG FORMATS=md,yaml
-
-FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION}-alpine AS docsgen
-WORKDIR /src
-RUN --mount=target=. \
- --mount=target=/root/.cache,type=cache \
- go build -o /out/docsgen ./docs/yaml/main/generate.go
-
-FROM --platform=${BUILDPLATFORM} alpine AS gen
-RUN apk add --no-cache rsync git
-WORKDIR /src
-COPY --from=docsgen /out/docsgen /usr/bin
-ARG FORMATS
-RUN --mount=target=/context \
- --mount=target=.,type=tmpfs <&2 'ERROR: Docs result differs. Please update with "make docs"'
- git status --porcelain -- docs/reference
- exit 1
-fi
-EOT
diff --git a/docs/examples/provider.go b/docs/examples/provider.go
new file mode 100644
index 00000000000..79fd3256eed
--- /dev/null
+++ b/docs/examples/provider.go
@@ -0,0 +1,152 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+)
+
+func main() {
+ cmd := &cobra.Command{
+ Short: "Compose Provider Example",
+ Use: "demo",
+ }
+ cmd.AddCommand(composeCommand())
+ err := cmd.Execute()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+
+type options struct {
+ db string
+ size int
+}
+
+func composeCommand() *cobra.Command {
+ c := &cobra.Command{
+ Use: "compose EVENT",
+ TraverseChildren: true,
+ }
+ c.PersistentFlags().String("project-name", "", "compose project name") // unused
+
+ var options options
+ upCmd := &cobra.Command{
+ Use: "up",
+ Run: func(_ *cobra.Command, args []string) {
+ up(options, args)
+ },
+ Args: cobra.ExactArgs(1),
+ }
+
+ upCmd.Flags().StringVar(&options.db, "type", "", "Database type (mysql, postgres, etc.)")
+ _ = upCmd.MarkFlagRequired("type")
+ upCmd.Flags().IntVar(&options.size, "size", 10, "Database size in GB")
+ upCmd.Flags().String("name", "", "Name of the database to be created")
+ _ = upCmd.MarkFlagRequired("name")
+
+ downCmd := &cobra.Command{
+ Use: "down",
+ Run: down,
+ Args: cobra.ExactArgs(1),
+ }
+ downCmd.Flags().String("name", "", "Name of the database to be deleted")
+ _ = downCmd.MarkFlagRequired("name")
+
+ c.AddCommand(upCmd, downCmd)
+ c.AddCommand(metadataCommand(upCmd, downCmd))
+ return c
+}
+
+const lineSeparator = "\n"
+
+func up(options options, args []string) {
+ servicename := args[0]
+ fmt.Printf(`{ "type": "debug", "message": "Starting %s" }%s`, servicename, lineSeparator)
+
+ for i := 0; i < options.size; i += 10 {
+ time.Sleep(1 * time.Second)
+ fmt.Printf(`{ "type": "info", "message": "Processing ... %d%%" }%s`, i*100/options.size, lineSeparator)
+ }
+ fmt.Printf(`{ "type": "setenv", "message": "URL=https://magic.cloud/%s" }%s`, servicename, lineSeparator)
+}
+
+func down(_ *cobra.Command, _ []string) {
+ fmt.Printf(`{ "type": "error", "message": "Permission error" }%s`, lineSeparator)
+}
+
+func metadataCommand(upCmd, downCmd *cobra.Command) *cobra.Command {
+ return &cobra.Command{
+ Use: "metadata",
+ Run: func(cmd *cobra.Command, _ []string) {
+ metadata(upCmd, downCmd)
+ },
+ Args: cobra.NoArgs,
+ }
+}
+
+func metadata(upCmd, downCmd *cobra.Command) {
+ metadata := ProviderMetadata{}
+ metadata.Description = "Manage services on AwesomeCloud"
+ metadata.Up = commandParameters(upCmd)
+ metadata.Down = commandParameters(downCmd)
+ jsonMetadata, err := json.Marshal(metadata)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(string(jsonMetadata))
+}
+
+func commandParameters(cmd *cobra.Command) CommandMetadata {
+ cmdMetadata := CommandMetadata{}
+ cmd.Flags().VisitAll(func(f *pflag.Flag) {
+ _, isRequired := f.Annotations[cobra.BashCompOneRequiredFlag]
+ cmdMetadata.Parameters = append(cmdMetadata.Parameters, Metadata{
+ Name: f.Name,
+ Description: f.Usage,
+ Required: isRequired,
+ Type: f.Value.Type(),
+ Default: f.DefValue,
+ })
+ })
+ return cmdMetadata
+}
+
+type ProviderMetadata struct {
+ Description string `json:"description"`
+ Up CommandMetadata `json:"up"`
+ Down CommandMetadata `json:"down"`
+}
+
+type CommandMetadata struct {
+ Parameters []Metadata `json:"parameters"`
+}
+
+type Metadata struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Required bool `json:"required"`
+ Type string `json:"type"`
+ Default string `json:"default,omitempty"`
+}
diff --git a/docs/extension.md b/docs/extension.md
new file mode 100644
index 00000000000..8991b63ca80
--- /dev/null
+++ b/docs/extension.md
@@ -0,0 +1,176 @@
+# About
+
+The Compose application model defines `service` as an abstraction for a computing unit managing (a subset of)
+application needs, which can interact with other service by relying on network(s). Docker Compose is designed
+to use the Docker Engine ("Moby") API to manage services as containers, but the abstraction _could_ also cover
+many other runtimes, typically cloud services or services natively provided by host.
+
+The Compose extensibility model has been designed to extend the `service` support to runtimes accessible through
+third-party tooling.
+
+# Architecture
+
+Compose extensibility relies on the `provider` attribute to select the actual binary responsible for managing
+the resource(s) needed to run a service.
+
+```yaml
+ database:
+ provider:
+ type: awesomecloud
+ options:
+ type: mysql
+ size: 256
+ name: myAwesomeCloudDB
+```
+
+`provider.type` tells Compose the binary to run, which can be either:
+- Another Docker CLI plugin (typically, `model` to run `docker-model`)
+- An executable in user's `PATH`
+
+If `provider.type` doesn't resolve into any of those, Compose will report an error and interrupt the `up` command.
+
+To be a valid Compose extension, provider command *MUST* accept a `compose` command (which can be hidden)
+with subcommands `up` and `down`.
+
+## Up lifecycle
+
+To execute an application's `up` lifecycle, Compose executes the provider's `compose up` command, passing
+the project name, service name, and additional options. The `provider.options` are translated
+into command line flags. For example:
+```console
+awesomecloud compose --project-name up --type=mysql --size=256 "database"
+```
+
+> __Note:__ `project-name` _should_ be used by the provider to tag resources
+> set for project, so that later execution with `down` subcommand releases
+> all allocated resources set for the project.
+
+## Communication with Compose
+
+Providers can interact with Compose using `stdout` as a channel, sending JSON line delimited messages.
+JSON messages MUST include a `type` and a `message` attribute.
+```json
+{ "type": "info", "message": "preparing mysql ..." }
+```
+
+`type` can be either:
+- `info`: Reports status updates to the user. Compose will render message as the service state in the progress UI
+- `error`: Let's the user know something went wrong with details about the error. Compose will render the message as the reason for the service failure.
+- `setenv`: Let's the plugin tell Compose how dependent services can access the created resource. See next section for further details.
+- `debug`: Those messages could help debugging the provider, but are not rendered to the user by default. They are rendered when Compose is started with `--verbose` flag.
+
+```mermaid
+sequenceDiagram
+ Shell->>Compose: docker compose up
+ Compose->>Provider: compose up --project-name=xx --foo=bar "database"
+ Provider--)Compose: json { "info": "pulling 25%" }
+ Compose-)Shell: pulling 25%
+ Provider--)Compose: json { "info": "pulling 50%" }
+ Compose-)Shell: pulling 50%
+ Provider--)Compose: json { "info": "pulling 75%" }
+ Compose-)Shell: pulling 75%
+ Provider--)Compose: json { "setenv": "URL=http://cloud.com/abcd:1234" }
+ Compose-)Compose: set DATABASE_URL
+ Provider-)Compose: EOF (command complete) exit 0
+ Compose-)Shell: service started
+```
+
+## Connection to a service managed by a provider
+
+A service in the Compose application can declare dependency on a service managed by an external provider:
+
+```yaml
+services:
+ app:
+ image: myapp
+ depends_on:
+ - database
+
+ database:
+ provider:
+ type: awesomecloud
+```
+
+When the provider command sends a `setenv` JSON message, Compose injects the specified variable into any dependent service,
+automatically prefixing it with the service name. For example, if `awesomecloud compose up` returns:
+```json
+{"type": "setenv", "message": "URL=https://awesomecloud.com/db:1234"}
+```
+Then the `app` service, which depends on the service managed by the provider, will receive a `DATABASE_URL` environment variable injected
+into its runtime environment.
+
+> __Note:__ The `compose up` provider command _MUST_ be idempotent. If resource is already running, the command _MUST_ set
+> the same environment variables to ensure consistent configuration of dependent services.
+
+## Down lifecycle
+
+`down` lifecycle is equivalent to `up` with the ` compose --project-name down ` command.
+The provider is responsible for releasing all resources associated with the service.
+
+## Provide metadata about options
+
+Compose extensions *MAY* optionally implement a `metadata` subcommand to provide information about the parameters accepted by the `up` and `down` commands.
+
+The `metadata` subcommand takes no parameters and returns a JSON structure on the `stdout` channel that describes the parameters accepted by both the `up` and `down` commands, including whether each parameter is mandatory or optional.
+
+```console
+awesomecloud compose metadata
+```
+
+The expected JSON output format is:
+```json
+{
+ "description": "Manage services on AwesomeCloud",
+ "up": {
+ "parameters": [
+ {
+ "name": "type",
+ "description": "Database type (mysql, postgres, etc.)",
+ "required": true,
+ "type": "string"
+ },
+ {
+ "name": "size",
+ "description": "Database size in GB",
+ "required": false,
+ "type": "integer",
+ "default": "10"
+ },
+ {
+ "name": "name",
+ "description": "Name of the database to be created",
+ "required": true,
+ "type": "string"
+ }
+ ]
+ },
+ "down": {
+ "parameters": [
+ {
+ "name": "name",
+ "description": "Name of the database to be removed",
+ "required": true,
+ "type": "string"
+ }
+ ]
+ }
+}
+```
+The top elements are:
+- `description`: Human-readable description of the provider
+- `up`: Object describing the parameters accepted by the `up` command
+- `down`: Object describing the parameters accepted by the `down` command
+
+And for each command parameter, you should include the following properties:
+- `name`: The parameter name (without `--` prefix)
+- `description`: Human-readable description of the parameter
+- `required`: Boolean indicating if the parameter is mandatory
+- `type`: Parameter type (`string`, `integer`, `boolean`, etc.)
+- `default`: Default value (optional, only for non-required parameters)
+- `enum`: List of possible values supported by the parameter separated by `,` (optional, only for parameters with a limited set of values)
+
+This metadata allows Compose and other tools to understand the provider's interface and provide better user experience, such as validation, auto-completion, and documentation generation.
+
+## Examples
+
+See [example](examples/provider.go) for illustration on implementing this API in a command line
diff --git a/docs/reference/compose.md b/docs/reference/compose.md
index e5c27fa7561..d80bb86ec62 100644
--- a/docs/reference/compose.md
+++ b/docs/reference/compose.md
@@ -1,62 +1,77 @@
+
# docker compose
+```text
+docker compose [-f ...] [options] [COMMAND] [ARGS...]
+```
+
-Docker Compose
+Define and run multi-container applications with Docker
### Subcommands
-| Name | Description |
-| --- | --- |
-| [`build`](compose_build.md) | Build or rebuild services |
-| [`convert`](compose_convert.md) | Converts the compose file to platform's canonical format |
-| [`cp`](compose_cp.md) | Copy files/folders between a service container and the local filesystem |
-| [`create`](compose_create.md) | Creates containers for a service. |
-| [`down`](compose_down.md) | Stop and remove containers, networks |
-| [`events`](compose_events.md) | Receive real time events from containers. |
-| [`exec`](compose_exec.md) | Execute a command in a running container. |
-| [`images`](compose_images.md) | List images used by the created containers |
-| [`kill`](compose_kill.md) | Force stop service containers. |
-| [`logs`](compose_logs.md) | View output from containers |
-| [`ls`](compose_ls.md) | List running compose projects |
-| [`pause`](compose_pause.md) | Pause services |
-| [`port`](compose_port.md) | Print the public port for a port binding. |
-| [`ps`](compose_ps.md) | List containers |
-| [`pull`](compose_pull.md) | Pull service images |
-| [`push`](compose_push.md) | Push service images |
-| [`restart`](compose_restart.md) | Restart containers |
-| [`rm`](compose_rm.md) | Removes stopped service containers |
-| [`run`](compose_run.md) | Run a one-off command on a service. |
-| [`start`](compose_start.md) | Start services |
-| [`stop`](compose_stop.md) | Stop services |
-| [`top`](compose_top.md) | Display the running processes |
-| [`unpause`](compose_unpause.md) | Unpause services |
-| [`up`](compose_up.md) | Create and start containers |
-| [`version`](compose_version.md) | Show the Docker Compose version information |
+| Name | Description |
+|:--------------------------------|:----------------------------------------------------------------------------------------|
+| [`attach`](compose_attach.md) | Attach local standard input, output, and error streams to a service's running container |
+| [`bridge`](compose_bridge.md) | Convert compose files into another model |
+| [`build`](compose_build.md) | Build or rebuild services |
+| [`commit`](compose_commit.md) | Create a new image from a service container's changes |
+| [`config`](compose_config.md) | Parse, resolve and render compose file in canonical format |
+| [`cp`](compose_cp.md) | Copy files/folders between a service container and the local filesystem |
+| [`create`](compose_create.md) | Creates containers for a service |
+| [`down`](compose_down.md) | Stop and remove containers, networks |
+| [`events`](compose_events.md) | Receive real time events from containers |
+| [`exec`](compose_exec.md) | Execute a command in a running container |
+| [`export`](compose_export.md) | Export a service container's filesystem as a tar archive |
+| [`images`](compose_images.md) | List images used by the created containers |
+| [`kill`](compose_kill.md) | Force stop service containers |
+| [`logs`](compose_logs.md) | View output from containers |
+| [`ls`](compose_ls.md) | List running compose projects |
+| [`pause`](compose_pause.md) | Pause services |
+| [`port`](compose_port.md) | Print the public port for a port binding |
+| [`ps`](compose_ps.md) | List containers |
+| [`publish`](compose_publish.md) | Publish compose application |
+| [`pull`](compose_pull.md) | Pull service images |
+| [`push`](compose_push.md) | Push service images |
+| [`restart`](compose_restart.md) | Restart service containers |
+| [`rm`](compose_rm.md) | Removes stopped service containers |
+| [`run`](compose_run.md) | Run a one-off command on a service |
+| [`scale`](compose_scale.md) | Scale services |
+| [`start`](compose_start.md) | Start services |
+| [`stats`](compose_stats.md) | Display a live stream of container(s) resource usage statistics |
+| [`stop`](compose_stop.md) | Stop services |
+| [`top`](compose_top.md) | Display the running processes |
+| [`unpause`](compose_unpause.md) | Unpause services |
+| [`up`](compose_up.md) | Create and start containers |
+| [`version`](compose_version.md) | Show the Docker Compose version information |
+| [`volumes`](compose_volumes.md) | List volumes |
+| [`wait`](compose_wait.md) | Block until containers of all (or specified) services stop. |
+| [`watch`](compose_watch.md) | Watch build context for service and rebuild/refresh containers when files are updated |
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `--ansi` | `string` | `auto` | Control when to print ANSI control characters ("never"\|"always"\|"auto") |
-| `--compatibility` | | | Run compose in backward compatibility mode |
-| `--env-file` | `string` | | Specify an alternate environment file. |
-| `-f`, `--file` | `stringArray` | | Compose configuration files |
-| `--profile` | `stringArray` | | Specify a profile to enable |
-| `--project-directory` | `string` | | Specify an alternate working directory
-(default: the path of the Compose file) |
-| `-p`, `--project-name` | `string` | | Project name |
+| Name | Type | Default | Description |
+|:-----------------------|:--------------|:--------|:----------------------------------------------------------------------------------------------------|
+| `--all-resources` | `bool` | | Include all resources, even those not used by services |
+| `--ansi` | `string` | `auto` | Control when to print ANSI control characters ("never"\|"always"\|"auto") |
+| `--compatibility` | `bool` | | Run compose in backward compatibility mode |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--env-file` | `stringArray` | | Specify an alternate environment file |
+| `-f`, `--file` | `stringArray` | | Compose configuration files |
+| `--parallel` | `int` | `-1` | Control max parallelism, -1 for unlimited |
+| `--profile` | `stringArray` | | Specify a profile to enable |
+| `--progress` | `string` | | Set type of progress output (auto, tty, plain, json, quiet) |
+| `--project-directory` | `string` | | Specify an alternate working directory
(default: the path of the, first specified, Compose file) |
+| `-p`, `--project-name` | `string` | | Project name |
-## Description
+## Examples
-You can use compose subcommand, `docker compose [-f ...] [options] [COMMAND] [ARGS...]`, to build and manage
-multiple services in Docker containers.
-
-### Use `-f` to specify name and path of one or more Compose files
-Use the `-f` flag to specify the location of a Compose configuration file.
+### Use `-f` to specify the name and path of one or more Compose files
+Use the `-f` flag to specify the location of a Compose [configuration file](/reference/compose-file/).
#### Specifying multiple Compose files
You can supply multiple `-f` configuration files. When you supply multiple files, Compose combines them into a single
@@ -66,10 +81,10 @@ to their predecessors.
For example, consider this command line:
```console
-$ docker compose -f docker-compose.yml -f docker-compose.admin.yml run backup_db
+$ docker compose -f compose.yaml -f compose.admin.yaml run backup_db
```
-The `docker-compose.yml` file might specify a `webapp` service.
+The `compose.yaml` file might specify a `webapp` service.
```yaml
services:
@@ -80,7 +95,7 @@ services:
volumes:
- "/data"
```
-If the `docker-compose.admin.yml` also specifies this same service, any matching fields override the previous file.
+If the `compose.admin.yaml` also specifies this same service, any matching fields override the previous file.
New values, add to the `webapp` service configuration.
```yaml
@@ -112,14 +127,72 @@ get the postgres image for the db service from anywhere by using the `-f` flag a
$ docker compose -f ~/sandbox/rails/compose.yaml pull db
```
-### Use `-p` to specify a project name
+#### Using an OCI published artifact
+You can use the `-f` flag with the `oci://` prefix to reference a Compose file that has been published to an OCI registry.
+This allows you to distribute and version your Compose configurations as OCI artifacts.
-Each configuration has a project name. If you supply a `-p` flag, you can specify a project name. If you don’t
-specify the flag, Compose uses the current directory name.
-Project name can also be set by `COMPOSE_PROJECT_NAME` environment variable.
+To use a Compose file from an OCI registry:
-Most compose subcommand can be ran without a compose file, just passing
-project name to retrieve the relevant resources.
+```console
+$ docker compose -f oci://registry.example.com/my-compose-project:latest up
+```
+
+You can also combine OCI artifacts with local files:
+
+```console
+$ docker compose -f oci://registry.example.com/my-compose-project:v1.0 -f compose.override.yaml up
+```
+
+The OCI artifact must contain a valid Compose file. You can publish Compose files to an OCI registry using the
+`docker compose publish` command.
+
+#### Using a git repository
+You can use the `-f` flag to reference a Compose file from a git repository. Compose supports various git URL formats:
+
+Using HTTPS:
+```console
+$ docker compose -f https://github.com/user/repo.git up
+```
+
+Using SSH:
+```console
+$ docker compose -f git@github.com:user/repo.git up
+```
+
+You can specify a specific branch, tag, or commit:
+```console
+$ docker compose -f https://github.com/user/repo.git@main up
+$ docker compose -f https://github.com/user/repo.git@v1.0.0 up
+$ docker compose -f https://github.com/user/repo.git@abc123 up
+```
+
+You can also specify a subdirectory within the repository:
+```console
+$ docker compose -f https://github.com/user/repo.git#main:path/to/compose.yaml up
+```
+
+When using git resources, Compose will clone the repository and use the specified Compose file. You can combine
+git resources with local files:
+
+```console
+$ docker compose -f https://github.com/user/repo.git -f compose.override.yaml up
+```
+
+### Use `-p` to specify a project name
+
+Each configuration has a project name. Compose sets the project name using
+the following mechanisms, in order of precedence:
+- The `-p` command line flag
+- The `COMPOSE_PROJECT_NAME` environment variable
+- The top level `name:` variable from the config file (or the last `name:`
+from a series of config files specified using `-f`)
+- The `basename` of the project directory containing the config file (or
+containing the first config file specified using `-f`)
+- The `basename` of the current directory if no config file is specified
+Project names must contain only lowercase letters, decimal digits, dashes,
+and underscores, and must begin with a lowercase letter or decimal digit. If
+the `basename` of the project directory or current directory violates this
+constraint, you must use one of the other mechanisms.
```console
$ docker compose -p my_project ps -a
@@ -134,21 +207,58 @@ demo_1 | 64 bytes from 127.0.0.1: seq=0 ttl=64 time=0.095 ms
### Use profiles to enable optional services
Use `--profile` to specify one or more active profiles
-Calling `docker compose --profile frontend up` will start the services with the profile `frontend` and services
+Calling `docker compose --profile frontend up` starts the services with the profile `frontend` and services
without any specified profiles.
-You can also enable multiple profiles, e.g. with `docker compose --profile frontend --profile debug up` the profiles `frontend` and `debug` will be enabled.
+You can also enable multiple profiles, e.g. with `docker compose --profile frontend --profile debug up` the profiles `frontend` and `debug` is enabled.
Profiles can also be set by `COMPOSE_PROFILES` environment variable.
+### Configuring parallelism
+
+Use `--parallel` to specify the maximum level of parallelism for concurrent engine calls.
+Calling `docker compose --parallel 1 pull` pulls the pullable images defined in the Compose file
+one at a time. This can also be used to control build concurrency.
+
+Parallelism can also be set by the `COMPOSE_PARALLEL_LIMIT` environment variable.
+
### Set up environment variables
You can set environment variables for various docker compose options, including the `-f`, `-p` and `--profiles` flags.
Setting the `COMPOSE_FILE` environment variable is equivalent to passing the `-f` flag,
-`COMPOSE_PROJECT_NAME` environment variable does the same for to the `-p` flag,
-and so does `COMPOSE_PROFILES` environment variable for to the `--profiles` flag.
+`COMPOSE_PROJECT_NAME` environment variable does the same as the `-p` flag,
+`COMPOSE_PROFILES` environment variable is equivalent to the `--profiles` flag
+and `COMPOSE_PARALLEL_LIMIT` does the same as the `--parallel` flag.
-If flags are explicitly set on command line, associated environment variable is ignored
+If flags are explicitly set on the command line, the associated environment variable is ignored.
-Setting the `COMPOSE_IGNORE_ORPHANS` environment variable to `true` will stop docker compose from detecting orphaned
+Setting the `COMPOSE_IGNORE_ORPHANS` environment variable to `true` stops docker compose from detecting orphaned
containers for the project.
+
+Setting the `COMPOSE_MENU` environment variable to `false` disables the helper menu when running `docker compose up`
+in attached mode. Alternatively, you can also run `docker compose up --menu=false` to disable the helper menu.
+
+### Use Dry Run mode to test your command
+
+Use `--dry-run` flag to test a command without changing your application stack state.
+Dry Run mode shows you all the steps Compose applies when executing a command, for example:
+```console
+$ docker compose --dry-run up --build -d
+[+] Pulling 1/1
+ ✔ DRY-RUN MODE - db Pulled 0.9s
+[+] Running 10/8
+ ✔ DRY-RUN MODE - build service backend 0.0s
+ ✔ DRY-RUN MODE - ==> ==> writing image dryRun-754a08ddf8bcb1cf22f310f09206dd783d42f7dd 0.0s
+ ✔ DRY-RUN MODE - ==> ==> naming to nginx-golang-mysql-backend 0.0s
+ ✔ DRY-RUN MODE - Network nginx-golang-mysql_default Created 0.0s
+ ✔ DRY-RUN MODE - Container nginx-golang-mysql-db-1 Created 0.0s
+ ✔ DRY-RUN MODE - Container nginx-golang-mysql-backend-1 Created 0.0s
+ ✔ DRY-RUN MODE - Container nginx-golang-mysql-proxy-1 Created 0.0s
+ ✔ DRY-RUN MODE - Container nginx-golang-mysql-db-1 Healthy 0.5s
+ ✔ DRY-RUN MODE - Container nginx-golang-mysql-backend-1 Started 0.0s
+ ✔ DRY-RUN MODE - Container nginx-golang-mysql-proxy-1 Started Started
+```
+From the example above, you can see that the first step is to pull the image defined by `db` service, then build the `backend` service.
+Next, the containers are created. The `db` service is started, and the `backend` and `proxy` wait until the `db` service is healthy before starting.
+
+Dry Run mode works with almost all commands. You cannot use Dry Run mode with a command that doesn't change the state of a Compose stack such as `ps`, `ls`, `logs` for example.
diff --git a/docs/reference/compose_alpha.md b/docs/reference/compose_alpha.md
new file mode 100644
index 00000000000..34485d7deff
--- /dev/null
+++ b/docs/reference/compose_alpha.md
@@ -0,0 +1,22 @@
+# docker compose alpha
+
+
+Experimental commands
+
+### Subcommands
+
+| Name | Description |
+|:----------------------------------|:-----------------------------------------------------------------------------------------------------|
+| [`viz`](compose_alpha_viz.md) | EXPERIMENTAL - Generate a graphviz graph from your compose file |
+| [`watch`](compose_alpha_watch.md) | EXPERIMENTAL - Watch build context for service and rebuild/refresh containers when files are updated |
+
+
+### Options
+
+| Name | Type | Default | Description |
+|:------------|:-----|:--------|:--------------------------------|
+| `--dry-run` | | | Execute command in dry run mode |
+
+
+
+
diff --git a/docs/reference/compose_alpha_dry-run.md b/docs/reference/compose_alpha_dry-run.md
new file mode 100644
index 00000000000..7c68d94d66b
--- /dev/null
+++ b/docs/reference/compose_alpha_dry-run.md
@@ -0,0 +1,8 @@
+# docker compose alpha dry-run
+
+
+Dry run command allows you to test a command without applying changes
+
+
+
+
diff --git a/docs/reference/compose_alpha_generate.md b/docs/reference/compose_alpha_generate.md
new file mode 100644
index 00000000000..f4054627798
--- /dev/null
+++ b/docs/reference/compose_alpha_generate.md
@@ -0,0 +1,17 @@
+# docker compose alpha generate
+
+
+EXPERIMENTAL - Generate a Compose file from existing containers
+
+### Options
+
+| Name | Type | Default | Description |
+|:----------------|:---------|:--------|:------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--format` | `string` | `yaml` | Format the output. Values: [yaml \| json] |
+| `--name` | `string` | | Project name to set in the Compose file |
+| `--project-dir` | `string` | | Directory to use for the project |
+
+
+
+
diff --git a/docs/reference/compose_alpha_publish.md b/docs/reference/compose_alpha_publish.md
new file mode 100644
index 00000000000..6e77d714532
--- /dev/null
+++ b/docs/reference/compose_alpha_publish.md
@@ -0,0 +1,18 @@
+# docker compose alpha publish
+
+
+Publish compose application
+
+### Options
+
+| Name | Type | Default | Description |
+|:--------------------------|:---------|:--------|:-------------------------------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--oci-version` | `string` | | OCI image/artifact specification version (automatically determined by default) |
+| `--resolve-image-digests` | `bool` | | Pin image tags to digests |
+| `--with-env` | `bool` | | Include environment variables in the published OCI artifact |
+| `-y`, `--yes` | `bool` | | Assume "yes" as answer to all prompts |
+
+
+
+
diff --git a/docs/reference/compose_alpha_scale.md b/docs/reference/compose_alpha_scale.md
new file mode 100644
index 00000000000..f783f3335c7
--- /dev/null
+++ b/docs/reference/compose_alpha_scale.md
@@ -0,0 +1,15 @@
+# docker compose alpha scale
+
+
+Scale services
+
+### Options
+
+| Name | Type | Default | Description |
+|:------------|:-----|:--------|:--------------------------------|
+| `--dry-run` | | | Execute command in dry run mode |
+| `--no-deps` | | | Don't start linked services |
+
+
+
+
diff --git a/docs/reference/compose_alpha_viz.md b/docs/reference/compose_alpha_viz.md
new file mode 100644
index 00000000000..1a05aaac14d
--- /dev/null
+++ b/docs/reference/compose_alpha_viz.md
@@ -0,0 +1,19 @@
+# docker compose alpha viz
+
+
+EXPERIMENTAL - Generate a graphviz graph from your compose file
+
+### Options
+
+| Name | Type | Default | Description |
+|:---------------------|:-------|:--------|:---------------------------------------------------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--image` | `bool` | | Include service's image name in output graph |
+| `--indentation-size` | `int` | `1` | Number of tabs or spaces to use for indentation |
+| `--networks` | `bool` | | Include service's attached networks in output graph |
+| `--ports` | `bool` | | Include service's exposed ports in output graph |
+| `--spaces` | `bool` | | If given, space character ' ' will be used to indent,
otherwise tab character '\t' will be used |
+
+
+
+
diff --git a/docs/reference/compose_alpha_watch.md b/docs/reference/compose_alpha_watch.md
new file mode 100644
index 00000000000..aa8130e7a02
--- /dev/null
+++ b/docs/reference/compose_alpha_watch.md
@@ -0,0 +1,16 @@
+# docker compose alpha watch
+
+
+Watch build context for service and rebuild/refresh containers when files are updated
+
+### Options
+
+| Name | Type | Default | Description |
+|:------------|:-----|:--------|:----------------------------------------------|
+| `--dry-run` | | | Execute command in dry run mode |
+| `--no-up` | | | Do not build & start services before watching |
+| `--quiet` | | | hide build output |
+
+
+
+
diff --git a/docs/reference/compose_attach.md b/docs/reference/compose_attach.md
new file mode 100644
index 00000000000..0b9ede1e01a
--- /dev/null
+++ b/docs/reference/compose_attach.md
@@ -0,0 +1,17 @@
+# docker compose attach
+
+
+Attach local standard input, output, and error streams to a service's running container
+
+### Options
+
+| Name | Type | Default | Description |
+|:----------------|:---------|:--------|:----------------------------------------------------------|
+| `--detach-keys` | `string` | | Override the key sequence for detaching from a container. |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--index` | `int` | `0` | index of the container if service has multiple replicas. |
+| `--no-stdin` | `bool` | | Do not attach STDIN |
+| `--sig-proxy` | `bool` | `true` | Proxy all received signals to the process |
+
+
+
diff --git a/docs/reference/compose_bridge.md b/docs/reference/compose_bridge.md
new file mode 100644
index 00000000000..78d3da4934c
--- /dev/null
+++ b/docs/reference/compose_bridge.md
@@ -0,0 +1,22 @@
+# docker compose bridge
+
+
+Convert compose files into another model
+
+### Subcommands
+
+| Name | Description |
+|:-------------------------------------------------------|:-----------------------------------------------------------------------------|
+| [`convert`](compose_bridge_convert.md) | Convert compose files to Kubernetes manifests, Helm charts, or another model |
+| [`transformations`](compose_bridge_transformations.md) | Manage transformation images |
+
+
+### Options
+
+| Name | Type | Default | Description |
+|:------------|:-------|:--------|:--------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+
+
+
+
diff --git a/docs/reference/compose_bridge_convert.md b/docs/reference/compose_bridge_convert.md
new file mode 100644
index 00000000000..d4b91ba172d
--- /dev/null
+++ b/docs/reference/compose_bridge_convert.md
@@ -0,0 +1,17 @@
+# docker compose bridge convert
+
+
+Convert compose files to Kubernetes manifests, Helm charts, or another model
+
+### Options
+
+| Name | Type | Default | Description |
+|:-------------------------|:--------------|:--------|:-------------------------------------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `-o`, `--output` | `string` | `out` | The output directory for the Kubernetes resources |
+| `--templates` | `string` | | Directory containing transformation templates |
+| `-t`, `--transformation` | `stringArray` | | Transformation to apply to compose model (default: docker/compose-bridge-kubernetes) |
+
+
+
+
diff --git a/docs/reference/compose_bridge_transformations.md b/docs/reference/compose_bridge_transformations.md
new file mode 100644
index 00000000000..1e1c7be392b
--- /dev/null
+++ b/docs/reference/compose_bridge_transformations.md
@@ -0,0 +1,22 @@
+# docker compose bridge transformations
+
+
+Manage transformation images
+
+### Subcommands
+
+| Name | Description |
+|:-----------------------------------------------------|:-------------------------------|
+| [`create`](compose_bridge_transformations_create.md) | Create a new transformation |
+| [`list`](compose_bridge_transformations_list.md) | List available transformations |
+
+
+### Options
+
+| Name | Type | Default | Description |
+|:------------|:-------|:--------|:--------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+
+
+
+
diff --git a/docs/reference/compose_bridge_transformations_create.md b/docs/reference/compose_bridge_transformations_create.md
new file mode 100644
index 00000000000..187e8d9eca3
--- /dev/null
+++ b/docs/reference/compose_bridge_transformations_create.md
@@ -0,0 +1,15 @@
+# docker compose bridge transformations create
+
+
+Create a new transformation
+
+### Options
+
+| Name | Type | Default | Description |
+|:---------------|:---------|:--------|:----------------------------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `-f`, `--from` | `string` | | Existing transformation to copy (default: docker/compose-bridge-kubernetes) |
+
+
+
+
diff --git a/docs/reference/compose_bridge_transformations_list.md b/docs/reference/compose_bridge_transformations_list.md
new file mode 100644
index 00000000000..ce0a5e6911a
--- /dev/null
+++ b/docs/reference/compose_bridge_transformations_list.md
@@ -0,0 +1,20 @@
+# docker compose bridge transformations list
+
+
+List available transformations
+
+### Aliases
+
+`docker compose bridge transformations list`, `docker compose bridge transformations ls`
+
+### Options
+
+| Name | Type | Default | Description |
+|:----------------|:---------|:--------|:-------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--format` | `string` | `table` | Format the output. Values: [table \| json] |
+| `-q`, `--quiet` | `bool` | | Only display transformer names |
+
+
+
+
diff --git a/docs/reference/compose_build.md b/docs/reference/compose_build.md
index 292df454d28..a715974dfa5 100644
--- a/docs/reference/compose_build.md
+++ b/docs/reference/compose_build.md
@@ -1,30 +1,46 @@
# docker compose build
-Build or rebuild services
+Services are built once and then tagged, by default as `project-service`.
+
+If the Compose file specifies an
+[image](https://github.com/compose-spec/compose-spec/blob/main/spec.md#image) name,
+the image is tagged with that name, substituting any variables beforehand. See
+[variable interpolation](https://github.com/compose-spec/compose-spec/blob/main/spec.md#interpolation).
+
+If you change a service's `Dockerfile` or the contents of its build directory,
+run `docker compose build` to rebuild it.
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `--build-arg` | `stringArray` | | Set build-time variables for services. |
-| `--no-cache` | | | Do not use cache when building the image |
-| `--progress` | `string` | `auto` | Set type of progress output (auto, tty, plain, quiet) |
-| `--pull` | | | Always attempt to pull a newer version of the image. |
-| `-q`, `--quiet` | | | Don't print anything to STDOUT |
-| `--ssh` | `string` | | Set SSH authentications used when building service images. (use 'default' for using your default SSH Agent) |
+| Name | Type | Default | Description |
+|:----------------------|:--------------|:--------|:------------------------------------------------------------------------------------------------------------|
+| `--build-arg` | `stringArray` | | Set build-time variables for services |
+| `--builder` | `string` | | Set builder to use |
+| `--check` | `bool` | | Check build configuration |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `-m`, `--memory` | `bytes` | `0` | Set memory limit for the build container. Not supported by BuildKit. |
+| `--no-cache` | `bool` | | Do not use cache when building the image |
+| `--print` | `bool` | | Print equivalent bake file |
+| `--provenance` | `string` | | Add a provenance attestation |
+| `--pull` | `bool` | | Always attempt to pull a newer version of the image |
+| `--push` | `bool` | | Push service images |
+| `-q`, `--quiet` | `bool` | | Suppress the build output |
+| `--sbom` | `string` | | Add a SBOM attestation |
+| `--ssh` | `string` | | Set SSH authentications used when building service images. (use 'default' for using your default SSH Agent) |
+| `--with-dependencies` | `bool` | | Also build dependencies (transitively) |
## Description
-Services are built once and then tagged, by default as `project_service`.
+Services are built once and then tagged, by default as `project-service`.
If the Compose file specifies an
-[image](https://github.com/compose-spec/compose-spec/blob/master/spec.md#image) name,
+[image](https://github.com/compose-spec/compose-spec/blob/main/spec.md#image) name,
the image is tagged with that name, substituting any variables beforehand. See
-[variable interpolation](https://github.com/compose-spec/compose-spec/blob/master/spec.md#interpolation).
+[variable interpolation](https://github.com/compose-spec/compose-spec/blob/main/spec.md#interpolation).
If you change a service's `Dockerfile` or the contents of its build directory,
run `docker compose build` to rebuild it.
diff --git a/docs/reference/compose_commit.md b/docs/reference/compose_commit.md
new file mode 100644
index 00000000000..1aad40931f9
--- /dev/null
+++ b/docs/reference/compose_commit.md
@@ -0,0 +1,19 @@
+# docker compose commit
+
+
+Create a new image from a service container's changes
+
+### Options
+
+| Name | Type | Default | Description |
+|:------------------|:---------|:--------|:-----------------------------------------------------------|
+| `-a`, `--author` | `string` | | Author (e.g., "John Hannibal Smith ") |
+| `-c`, `--change` | `list` | | Apply Dockerfile instruction to the created image |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--index` | `int` | `0` | index of the container if service has multiple replicas. |
+| `-m`, `--message` | `string` | | Commit message |
+| `-p`, `--pause` | `bool` | `true` | Pause container during commit |
+
+
+
+
diff --git a/docs/reference/compose_config.md b/docs/reference/compose_config.md
new file mode 100644
index 00000000000..e2e773feae5
--- /dev/null
+++ b/docs/reference/compose_config.md
@@ -0,0 +1,40 @@
+# docker compose convert
+
+
+`docker compose config` renders the actual data model to be applied on the Docker Engine.
+It merges the Compose files set by `-f` flags, resolves variables in the Compose file, and expands short-notation into
+the canonical format.
+
+### Options
+
+| Name | Type | Default | Description |
+|:--------------------------|:---------|:--------|:----------------------------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--environment` | `bool` | | Print environment used for interpolation. |
+| `--format` | `string` | | Format the output. Values: [yaml \| json] |
+| `--hash` | `string` | | Print the service config hash, one per line. |
+| `--images` | `bool` | | Print the image names, one per line. |
+| `--lock-image-digests` | `bool` | | Produces an override file with image digests |
+| `--models` | `bool` | | Print the model names, one per line. |
+| `--networks` | `bool` | | Print the network names, one per line. |
+| `--no-consistency` | `bool` | | Don't check model consistency - warning: may produce invalid Compose output |
+| `--no-env-resolution` | `bool` | | Don't resolve service env files |
+| `--no-interpolate` | `bool` | | Don't interpolate environment variables |
+| `--no-normalize` | `bool` | | Don't normalize compose model |
+| `--no-path-resolution` | `bool` | | Don't resolve file paths |
+| `-o`, `--output` | `string` | | Save to file (default to stdout) |
+| `--profiles` | `bool` | | Print the profile names, one per line. |
+| `-q`, `--quiet` | `bool` | | Only validate the configuration, don't print anything |
+| `--resolve-image-digests` | `bool` | | Pin image tags to digests |
+| `--services` | `bool` | | Print the service names, one per line. |
+| `--variables` | `bool` | | Print model variables and default values. |
+| `--volumes` | `bool` | | Print the volume names, one per line. |
+
+
+
+
+## Description
+
+`docker compose config` renders the actual data model to be applied on the Docker Engine.
+It merges the Compose files set by `-f` flags, resolves variables in the Compose file, and expands short-notation into
+the canonical format.
diff --git a/docs/reference/compose_convert.md b/docs/reference/compose_convert.md
deleted file mode 100644
index 91ff89ea9ff..00000000000
--- a/docs/reference/compose_convert.md
+++ /dev/null
@@ -1,35 +0,0 @@
-# docker compose convert
-
-
-Converts the compose file to platform's canonical format
-
-### Aliases
-
-`convert`, `config`
-
-### Options
-
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `--format` | `string` | `yaml` | Format the output. Values: [yaml \| json] |
-| `--hash` | `string` | | Print the service config hash, one per line. |
-| `--images` | | | Print the image names, one per line. |
-| `--no-interpolate` | | | Don't interpolate environment variables. |
-| `--no-normalize` | | | Don't normalize compose model. |
-| `-o`, `--output` | `string` | | Save to file (default to stdout) |
-| `--profiles` | | | Print the profile names, one per line. |
-| `-q`, `--quiet` | | | Only validate the configuration, don't print anything. |
-| `--resolve-image-digests` | | | Pin image tags to digests. |
-| `--services` | | | Print the service names, one per line. |
-| `--volumes` | | | Print the volume names, one per line. |
-
-
-
-
-## Description
-
-`docker compose convert` render the actual data model to be applied on target platform. When used with Docker engine,
-it merges the Compose files set by `-f` flags, resolves variables in Compose file, and expands short-notation into
-fully defined Compose model.
-
-To allow smooth migration from docker-compose, this subcommand declares alias `docker compose config`
diff --git a/docs/reference/compose_cp.md b/docs/reference/compose_cp.md
index 2d97b9f2f4a..0886bbd9f94 100644
--- a/docs/reference/compose_cp.md
+++ b/docs/reference/compose_cp.md
@@ -5,12 +5,13 @@ Copy files/folders between a service container and the local filesystem
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `--all` | | | Copy to all the containers of the service. |
-| `-a`, `--archive` | | | Archive mode (copy all uid/gid information) |
-| `-L`, `--follow-link` | | | Always follow symbol link in SRC_PATH |
-| `--index` | `int` | `1` | Index of the container if there are multiple instances of a service [default: 1]. |
+| Name | Type | Default | Description |
+|:----------------------|:-------|:--------|:--------------------------------------------------------|
+| `--all` | `bool` | | Include containers created by the run command |
+| `-a`, `--archive` | `bool` | | Archive mode (copy all uid/gid information) |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `-L`, `--follow-link` | `bool` | | Always follow symbol link in SRC_PATH |
+| `--index` | `int` | `0` | Index of the container if service has multiple replicas |
diff --git a/docs/reference/compose_create.md b/docs/reference/compose_create.md
index 00123ba788b..4b0b876da91 100644
--- a/docs/reference/compose_create.md
+++ b/docs/reference/compose_create.md
@@ -1,16 +1,22 @@
# docker compose create
-Creates containers for a service.
+Creates containers for a service
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `--build` | | | Build images before starting containers. |
-| `--force-recreate` | | | Recreate containers even if their configuration and image haven't changed. |
-| `--no-build` | | | Don't build an image, even if it's missing. |
-| `--no-recreate` | | | If containers already exist, don't recreate them. Incompatible with --force-recreate. |
+| Name | Type | Default | Description |
+|:-------------------|:--------------|:---------|:----------------------------------------------------------------------------------------------|
+| `--build` | `bool` | | Build images before starting containers |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--force-recreate` | `bool` | | Recreate containers even if their configuration and image haven't changed |
+| `--no-build` | `bool` | | Don't build an image, even if it's policy |
+| `--no-recreate` | `bool` | | If containers already exist, don't recreate them. Incompatible with --force-recreate. |
+| `--pull` | `string` | `policy` | Pull image before running ("always"\|"missing"\|"never"\|"build") |
+| `--quiet-pull` | `bool` | | Pull without printing progress information |
+| `--remove-orphans` | `bool` | | Remove containers for services not defined in the Compose file |
+| `--scale` | `stringArray` | | Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present. |
+| `-y`, `--yes` | `bool` | | Assume "yes" as answer to all prompts and run non-interactively |
diff --git a/docs/reference/compose_down.md b/docs/reference/compose_down.md
index 8864aa6e828..2ac0bf2da42 100644
--- a/docs/reference/compose_down.md
+++ b/docs/reference/compose_down.md
@@ -1,16 +1,29 @@
# docker compose down
-Stop and remove containers, networks
+Stops containers and removes containers, networks, volumes, and images created by `up`.
+
+By default, the only things removed are:
+
+- Containers for services defined in the Compose file.
+- Networks defined in the networks section of the Compose file.
+- The default network, if one is used.
+
+Networks and volumes defined as external are never removed.
+
+Anonymous volumes are not removed by default. However, as they don’t have a stable name, they are not automatically
+mounted by a subsequent `up`. For data that needs to persist between updates, use explicit paths as bind mounts or
+named volumes.
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `--remove-orphans` | | | Remove containers for services not defined in the Compose file. |
-| `--rmi` | `string` | | Remove images used by services. "local" remove only images that don't have a custom tag ("local"\|"all") |
-| `-t`, `--timeout` | `int` | `10` | Specify a shutdown timeout in seconds |
-| `-v`, `--volumes` | | | Remove named volumes declared in the `volumes` section of the Compose file and anonymous volumes attached to containers. |
+| Name | Type | Default | Description |
+|:-------------------|:---------|:--------|:------------------------------------------------------------------------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--remove-orphans` | `bool` | | Remove containers for services not defined in the Compose file |
+| `--rmi` | `string` | | Remove images used by services. "local" remove only images that don't have a custom tag ("local"\|"all") |
+| `-t`, `--timeout` | `int` | `0` | Specify a shutdown timeout in seconds |
+| `-v`, `--volumes` | `bool` | | Remove named volumes declared in the "volumes" section of the Compose file and anonymous volumes attached to containers |
@@ -21,12 +34,12 @@ Stops containers and removes containers, networks, volumes, and images created b
By default, the only things removed are:
-- Containers for services defined in the Compose file
-- Networks defined in the networks section of the Compose file
-- The default network, if one is used
+- Containers for services defined in the Compose file.
+- Networks defined in the networks section of the Compose file.
+- The default network, if one is used.
Networks and volumes defined as external are never removed.
-Anonymous volumes are not removed by default. However, as they don’t have a stable name, they will not be automatically
+Anonymous volumes are not removed by default. However, as they don’t have a stable name, they are not automatically
mounted by a subsequent `up`. For data that needs to persist between updates, use explicit paths as bind mounts or
named volumes.
diff --git a/docs/reference/compose_events.md b/docs/reference/compose_events.md
index 496d204e3b8..066b5cf3831 100644
--- a/docs/reference/compose_events.md
+++ b/docs/reference/compose_events.md
@@ -1,13 +1,34 @@
# docker compose events
-Receive real time events from containers.
+Stream container events for every container in the project.
+
+With the `--json` flag, a json object is printed one per line with the format:
+
+```json
+{
+ "time": "2015-11-20T18:01:03.615550",
+ "type": "container",
+ "action": "create",
+ "id": "213cf7...5fc39a",
+ "service": "web",
+ "attributes": {
+ "name": "application_web_1",
+ "image": "alpine:edge"
+ }
+}
+```
+
+The events that can be received using this can be seen [here](/reference/cli/docker/system/events/#object-types).
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `--json` | | | Output events as a stream of json objects |
+| Name | Type | Default | Description |
+|:------------|:---------|:--------|:------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--json` | `bool` | | Output events as a stream of json objects |
+| `--since` | `string` | | Show all events created since timestamp |
+| `--until` | `string` | | Stream events until this timestamp |
@@ -32,4 +53,4 @@ With the `--json` flag, a json object is printed one per line with the format:
}
```
-The events that can be received using this can be seen [here](https://docs.docker.com/engine/reference/commandline/events/#object-types).
+The events that can be received using this can be seen [here](https://docs.docker.com/reference/cli/docker/system/events/#object-types).
diff --git a/docs/reference/compose_exec.md b/docs/reference/compose_exec.md
index 9dd818b9fb3..312219e7316 100644
--- a/docs/reference/compose_exec.md
+++ b/docs/reference/compose_exec.md
@@ -1,19 +1,29 @@
# docker compose exec
-Execute a command in a running container.
+This is the equivalent of `docker exec` targeting a Compose service.
+
+With this subcommand, you can run arbitrary commands in your services. Commands allocate a TTY by default, so
+you can use a command such as `docker compose exec web sh` to get an interactive prompt.
+
+By default, Compose will enter container in interactive mode and allocate a TTY, while the equivalent `docker exec`
+command requires passing `--interactive --tty` flags to get the same behavior. Compose also support those two flags
+to offer a smooth migration between commands, whenever they are no-op by default. Still, `interactive` can be used to
+force disabling interactive mode (`--interactive=false`), typically when `docker compose exec` command is used inside
+a script.
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `-d`, `--detach` | | | Detached mode: Run command in the background. |
-| `-e`, `--env` | `stringArray` | | Set environment variables |
-| `--index` | `int` | `1` | index of the container if there are multiple instances of a service [default: 1]. |
-| `-T`, `--no-TTY` | | | Disable pseudo-TTY allocation. By default `docker compose exec` allocates a TTY. |
-| `--privileged` | | | Give extended privileges to the process. |
-| `-u`, `--user` | `string` | | Run the command as this user. |
-| `-w`, `--workdir` | `string` | | Path to workdir directory for this command. |
+| Name | Type | Default | Description |
+|:------------------|:--------------|:--------|:---------------------------------------------------------------------------------|
+| `-d`, `--detach` | `bool` | | Detached mode: Run command in the background |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `-e`, `--env` | `stringArray` | | Set environment variables |
+| `--index` | `int` | `0` | Index of the container if service has multiple replicas |
+| `-T`, `--no-tty` | `bool` | `true` | Disable pseudo-TTY allocation. By default 'docker compose exec' allocates a TTY. |
+| `--privileged` | `bool` | | Give extended privileges to the process |
+| `-u`, `--user` | `string` | | Run the command as this user |
+| `-w`, `--workdir` | `string` | | Path to workdir directory for this command |
@@ -22,5 +32,11 @@ Execute a command in a running container.
This is the equivalent of `docker exec` targeting a Compose service.
-With this subcommand you can run arbitrary commands in your services. Commands are by default allocating a TTY, so
+With this subcommand, you can run arbitrary commands in your services. Commands allocate a TTY by default, so
you can use a command such as `docker compose exec web sh` to get an interactive prompt.
+
+By default, Compose will enter container in interactive mode and allocate a TTY, while the equivalent `docker exec`
+command requires passing `--interactive --tty` flags to get the same behavior. Compose also support those two flags
+to offer a smooth migration between commands, whenever they are no-op by default. Still, `interactive` can be used to
+force disabling interactive mode (`--interactive=false`), typically when `docker compose exec` command is used inside
+a script.
\ No newline at end of file
diff --git a/docs/reference/compose_export.md b/docs/reference/compose_export.md
new file mode 100644
index 00000000000..942ea6a347f
--- /dev/null
+++ b/docs/reference/compose_export.md
@@ -0,0 +1,16 @@
+# docker compose export
+
+
+Export a service container's filesystem as a tar archive
+
+### Options
+
+| Name | Type | Default | Description |
+|:-----------------|:---------|:--------|:---------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--index` | `int` | `0` | index of the container if service has multiple replicas. |
+| `-o`, `--output` | `string` | | Write to a file, instead of STDOUT |
+
+
+
+
diff --git a/docs/reference/compose_images.md b/docs/reference/compose_images.md
index cfb0ad2cd4d..1e4e0259b1d 100644
--- a/docs/reference/compose_images.md
+++ b/docs/reference/compose_images.md
@@ -5,9 +5,11 @@ List images used by the created containers
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `-q`, `--quiet` | | | Only display IDs |
+| Name | Type | Default | Description |
+|:----------------|:---------|:--------|:-------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--format` | `string` | `table` | Format the output. Values: [table \| json] |
+| `-q`, `--quiet` | `bool` | | Only display IDs |
diff --git a/docs/reference/compose_kill.md b/docs/reference/compose_kill.md
index d0512faaec1..0b6c1d05f01 100644
--- a/docs/reference/compose_kill.md
+++ b/docs/reference/compose_kill.md
@@ -1,13 +1,19 @@
# docker compose kill
-Force stop service containers.
+Forces running containers to stop by sending a `SIGKILL` signal. Optionally the signal can be passed, for example:
+
+```console
+$ docker compose kill -s SIGINT
+```
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `-s`, `--signal` | `string` | `SIGKILL` | SIGNAL to send to the container. |
+| Name | Type | Default | Description |
+|:-------------------|:---------|:----------|:---------------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--remove-orphans` | `bool` | | Remove containers for services not defined in the Compose file |
+| `-s`, `--signal` | `string` | `SIGKILL` | SIGNAL to send to the container |
@@ -17,5 +23,5 @@ Force stop service containers.
Forces running containers to stop by sending a `SIGKILL` signal. Optionally the signal can be passed, for example:
```console
-$ docker-compose kill -s SIGINT
+$ docker compose kill -s SIGINT
```
diff --git a/docs/reference/compose_logs.md b/docs/reference/compose_logs.md
index 4f9690a7e2d..4c8ba7e3486 100644
--- a/docs/reference/compose_logs.md
+++ b/docs/reference/compose_logs.md
@@ -1,23 +1,25 @@
# docker compose logs
-View output from containers
+Displays log output from services
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `-f`, `--follow` | | | Follow log output. |
-| `--no-color` | | | Produce monochrome output. |
-| `--no-log-prefix` | | | Don't print prefix in logs. |
-| `--since` | `string` | | Show logs since timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes) |
-| `--tail` | `string` | `all` | Number of lines to show from the end of the logs for each container. |
-| `-t`, `--timestamps` | | | Show timestamps. |
-| `--until` | `string` | | Show logs before a timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes) |
+| Name | Type | Default | Description |
+|:---------------------|:---------|:--------|:-----------------------------------------------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `-f`, `--follow` | `bool` | | Follow log output |
+| `--index` | `int` | `0` | index of the container if service has multiple replicas |
+| `--no-color` | `bool` | | Produce monochrome output |
+| `--no-log-prefix` | `bool` | | Don't print prefix in logs |
+| `--since` | `string` | | Show logs since timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes) |
+| `-n`, `--tail` | `string` | `all` | Number of lines to show from the end of the logs for each container |
+| `-t`, `--timestamps` | `bool` | | Show timestamps |
+| `--until` | `string` | | Show logs before a timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes) |
## Description
-Displays log output from services.
\ No newline at end of file
+Displays log output from services
diff --git a/docs/reference/compose_ls.md b/docs/reference/compose_ls.md
index b172c7f3a10..7719d208609 100644
--- a/docs/reference/compose_ls.md
+++ b/docs/reference/compose_ls.md
@@ -1,20 +1,21 @@
# docker compose ls
-List running compose projects
+Lists running Compose projects
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `-a`, `--all` | | | Show all stopped Compose projects |
-| `--filter` | `filter` | | Filter output based on conditions provided. |
-| `--format` | `string` | `pretty` | Format the output. Values: [pretty \| json]. |
-| `-q`, `--quiet` | | | Only display IDs. |
+| Name | Type | Default | Description |
+|:----------------|:---------|:--------|:-------------------------------------------|
+| `-a`, `--all` | `bool` | | Show all stopped Compose projects |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--filter` | `filter` | | Filter output based on conditions provided |
+| `--format` | `string` | `table` | Format the output. Values: [table \| json] |
+| `-q`, `--quiet` | `bool` | | Only display project names |
## Description
-List Compose projects running on platform.
\ No newline at end of file
+Lists running Compose projects
diff --git a/docs/reference/compose_pause.md b/docs/reference/compose_pause.md
index dd34191bdc0..4a0d5bdcc03 100644
--- a/docs/reference/compose_pause.md
+++ b/docs/reference/compose_pause.md
@@ -1,7 +1,13 @@
# docker compose pause
-Pause services
+Pauses running containers of a service. They can be unpaused with `docker compose unpause`.
+
+### Options
+
+| Name | Type | Default | Description |
+|:------------|:-------|:--------|:--------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
diff --git a/docs/reference/compose_port.md b/docs/reference/compose_port.md
index 8ab893f0d5f..bbbfbf15616 100644
--- a/docs/reference/compose_port.md
+++ b/docs/reference/compose_port.md
@@ -1,18 +1,19 @@
# docker compose port
-Print the public port for a port binding.
+Prints the public port for a port binding
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `--index` | `int` | `1` | index of the container if service has multiple replicas |
-| `--protocol` | `string` | `tcp` | tcp or udp |
+| Name | Type | Default | Description |
+|:-------------|:---------|:--------|:--------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--index` | `int` | `0` | Index of the container if service has multiple replicas |
+| `--protocol` | `string` | `tcp` | tcp or udp |
## Description
-Prints the public port for a port binding.
\ No newline at end of file
+Prints the public port for a port binding
diff --git a/docs/reference/compose_ps.md b/docs/reference/compose_ps.md
index edac9310639..3572c530556 100644
--- a/docs/reference/compose_ps.md
+++ b/docs/reference/compose_ps.md
@@ -1,18 +1,36 @@
# docker compose ps
-List containers
+Lists containers for a Compose project, with current status and exposed ports.
+
+```console
+$ docker compose ps
+NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
+example-foo-1 alpine "/entrypoint.…" foo 4 seconds ago Up 2 seconds 0.0.0.0:8080->80/tcp
+```
+
+By default, only running containers are shown. `--all` flag can be used to include stopped containers.
+
+```console
+$ docker compose ps --all
+NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
+example-foo-1 alpine "/entrypoint.…" foo 4 seconds ago Up 2 seconds 0.0.0.0:8080->80/tcp
+example-bar-1 alpine "/entrypoint.…" bar 4 seconds ago exited (0)
+```
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `-a`, `--all` | | | Show all stopped containers (including those created by the run command) |
-| [`--filter`](#filter) | `string` | | Filter services by a property (supported filters: status). |
-| [`--format`](#format) | `string` | `pretty` | Format the output. Values: [pretty \| json] |
-| `-q`, `--quiet` | | | Only display IDs |
-| `--services` | | | Display services |
-| [`--status`](#status) | `stringArray` | | Filter services by status. Values: [paused \| restarting \| removing \| running \| dead \| created \| exited] |
+| Name | Type | Default | Description |
+|:----------------------|:--------------|:--------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `-a`, `--all` | `bool` | | Show all stopped containers (including those created by the run command) |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| [`--filter`](#filter) | `string` | | Filter services by a property (supported filters: status) |
+| [`--format`](#format) | `string` | `table` | Format output using a custom template:
'table': Print output in table format with column headers (default)
'table TEMPLATE': Print output in table format using the given Go template
'json': Print in JSON format
'TEMPLATE': Print output using the given Go template.
Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates |
+| `--no-trunc` | `bool` | | Don't truncate output |
+| `--orphans` | `bool` | `true` | Include orphaned services (not declared by project) |
+| `-q`, `--quiet` | `bool` | | Only display IDs |
+| `--services` | `bool` | | Display services |
+| [`--status`](#status) | `stringArray` | | Filter services by status. Values: [paused \| restarting \| removing \| running \| dead \| created \| exited] |
@@ -20,13 +38,20 @@ List containers
## Description
Lists containers for a Compose project, with current status and exposed ports.
-By default, both running and stopped containers are shown:
```console
$ docker compose ps
-NAME COMMAND SERVICE STATUS PORTS
-example-bar-1 "/docker-entrypoint.…" bar exited (0)
-example-foo-1 "/docker-entrypoint.…" foo running 0.0.0.0:8080->80/tcp
+NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
+example-foo-1 alpine "/entrypoint.…" foo 4 seconds ago Up 2 seconds 0.0.0.0:8080->80/tcp
+```
+
+By default, only running containers are shown. `--all` flag can be used to include stopped containers.
+
+```console
+$ docker compose ps --all
+NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
+example-foo-1 alpine "/entrypoint.…" foo 4 seconds ago Up 2 seconds 0.0.0.0:8080->80/tcp
+example-bar-1 alpine "/entrypoint.…" bar 4 seconds ago exited (0)
```
## Examples
@@ -35,7 +60,7 @@ example-foo-1 "/docker-entrypoint.…" foo running 0.0.0.0:8080->8
By default, the `docker compose ps` command uses a table ("pretty") format to
show the containers. The `--format` flag allows you to specify alternative
-presentations for the output. Currently supported options are `pretty` (default),
+presentations for the output. Currently, supported options are `pretty` (default),
and `json`, which outputs information about the containers as a JSON array:
```console
@@ -44,7 +69,7 @@ $ docker compose ps --format json
```
The JSON output allows you to use the information in other tools for further
-processing, for example, using the [`jq` utility](https://stedolan.github.io/jq/){:target="_blank" rel="noopener" class="_"}
+processing, for example, using the [`jq` utility](https://stedolan.github.io/jq/)
to pretty-print the JSON:
```console
@@ -85,33 +110,29 @@ $ docker compose ps --format json | jq .
### Filter containers by status (--status)
Use the `--status` flag to filter the list of containers by status. For example,
-to show only containers that are running, or only containers that have exited:
+to show only containers that are running or only containers that have exited:
```console
$ docker compose ps --status=running
-NAME COMMAND SERVICE STATUS PORTS
-example-foo-1 "/docker-entrypoint.…" foo running 0.0.0.0:8080->80/tcp
+NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
+example-foo-1 alpine "/entrypoint.…" foo 4 seconds ago Up 2 seconds 0.0.0.0:8080->80/tcp
$ docker compose ps --status=exited
-NAME COMMAND SERVICE STATUS PORTS
-example-bar-1 "/docker-entrypoint.…" bar exited (0)
+NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
+example-bar-1 alpine "/entrypoint.…" bar 4 seconds ago exited (0)
```
### Filter containers by status (--filter)
-The [`--status` flag](#status) is a convenience shorthand for the `--filter status=`
+The [`--status` flag](#status) is a convenient shorthand for the `--filter status=`
flag. The example below is the equivalent to the example from the previous section,
this time using the `--filter` flag:
```console
$ docker compose ps --filter status=running
-NAME COMMAND SERVICE STATUS PORTS
-example-foo-1 "/docker-entrypoint.…" foo running 0.0.0.0:8080->80/tcp
-
-$ docker compose ps --filter status=running
-NAME COMMAND SERVICE STATUS PORTS
-example-bar-1 "/docker-entrypoint.…" bar exited (0)
+NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
+example-foo-1 alpine "/entrypoint.…" foo 4 seconds ago Up 2 seconds 0.0.0.0:8080->80/tcp
```
The `docker compose ps` command currently only supports the `--filter status=`
-option, but additional filter options may be added in future.
+option, but additional filter options may be added in the future.
diff --git a/docs/reference/compose_publish.md b/docs/reference/compose_publish.md
new file mode 100644
index 00000000000..9a82fc260a7
--- /dev/null
+++ b/docs/reference/compose_publish.md
@@ -0,0 +1,19 @@
+# docker compose publish
+
+
+Publish compose application
+
+### Options
+
+| Name | Type | Default | Description |
+|:--------------------------|:---------|:--------|:-------------------------------------------------------------------------------|
+| `--app` | `bool` | | Published compose application (includes referenced images) |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--oci-version` | `string` | | OCI image/artifact specification version (automatically determined by default) |
+| `--resolve-image-digests` | `bool` | | Pin image tags to digests |
+| `--with-env` | `bool` | | Include environment variables in the published OCI artifact |
+| `-y`, `--yes` | `bool` | | Assume "yes" as answer to all prompts |
+
+
+
+
diff --git a/docs/reference/compose_pull.md b/docs/reference/compose_pull.md
index c081c228498..6a47f9d509f 100644
--- a/docs/reference/compose_pull.md
+++ b/docs/reference/compose_pull.md
@@ -1,28 +1,30 @@
# docker compose pull
-Pull service images
+Pulls an image associated with a service defined in a `compose.yaml` file, but does not start containers based on those images
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `--ignore-pull-failures` | | | Pull what it can and ignores images with pull failures |
-| `--include-deps` | | | Also pull services declared as dependencies |
-| `-q`, `--quiet` | | | Pull without printing progress information |
+| Name | Type | Default | Description |
+|:-------------------------|:---------|:--------|:-------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--ignore-buildable` | `bool` | | Ignore images that can be built |
+| `--ignore-pull-failures` | `bool` | | Pull what it can and ignores images with pull failures |
+| `--include-deps` | `bool` | | Also pull services declared as dependencies |
+| `--policy` | `string` | | Apply pull policy ("missing"\|"always") |
+| `-q`, `--quiet` | `bool` | | Pull without printing progress information |
## Description
-Pulls an image associated with a service defined in a `compose.yaml` file, but does not start containers based on
-those images.
+Pulls an image associated with a service defined in a `compose.yaml` file, but does not start containers based on those images
## Examples
-suppose you have this `compose.yaml`:
+Consider the following `compose.yaml`:
```yaml
services:
@@ -61,4 +63,6 @@ $ docker compose pull db
⠹ f63c47038e66 Waiting 9.3s
⠹ 77a0c198cde5 Waiting 9.3s
⠹ c8752d5b785c Waiting 9.3s
-``̀`
+```
+
+`docker compose pull` tries to pull image for services with a build section. If pull fails, it lets you know this service image must be built. You can skip this by setting `--ignore-buildable` flag.
diff --git a/docs/reference/compose_push.md b/docs/reference/compose_push.md
index a1569cc808a..0efc48c46d3 100644
--- a/docs/reference/compose_push.md
+++ b/docs/reference/compose_push.md
@@ -1,13 +1,33 @@
# docker compose push
-Push service images
+Pushes images for services to their respective registry/repository.
+
+The following assumptions are made:
+- You are pushing an image you have built locally
+- You have access to the build key
+
+Examples
+
+```yaml
+services:
+ service1:
+ build: .
+ image: localhost:5000/yourimage ## goes to local registry
+
+ service2:
+ build: .
+ image: your-dockerid/yourimage ## goes to your repository on Docker Hub
+```
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `--ignore-push-failures` | | | Push what it can and ignores images with push failures |
+| Name | Type | Default | Description |
+|:-------------------------|:-------|:--------|:-------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--ignore-push-failures` | `bool` | | Push what it can and ignores images with push failures |
+| `--include-deps` | `bool` | | Also push images of services declared as dependencies |
+| `-q`, `--quiet` | `bool` | | Push without printing progress information |
diff --git a/docs/reference/compose_restart.md b/docs/reference/compose_restart.md
index 12326662b62..e57f346a81a 100644
--- a/docs/reference/compose_restart.md
+++ b/docs/reference/compose_restart.md
@@ -1,26 +1,37 @@
# docker compose restart
-Restart containers
+Restarts all stopped and running services, or the specified services only.
+
+If you make changes to your `compose.yml` configuration, these changes are not reflected
+after running this command. For example, changes to environment variables (which are added
+after a container is built, but before the container's command is executed) are not updated
+after restarting.
+
+If you are looking to configure a service's restart policy, refer to
+[restart](https://github.com/compose-spec/compose-spec/blob/main/spec.md#restart)
+or [restart_policy](https://github.com/compose-spec/compose-spec/blob/main/deploy.md#restart_policy).
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `-t`, `--timeout` | `int` | `10` | Specify a shutdown timeout in seconds |
+| Name | Type | Default | Description |
+|:------------------|:-------|:--------|:--------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--no-deps` | `bool` | | Don't restart dependent services |
+| `-t`, `--timeout` | `int` | `0` | Specify a shutdown timeout in seconds |
## Description
-Restarts all stopped and running services.
+Restarts all stopped and running services, or the specified services only.
If you make changes to your `compose.yml` configuration, these changes are not reflected
after running this command. For example, changes to environment variables (which are added
after a container is built, but before the container's command is executed) are not updated
after restarting.
-If you are looking to configure a service's restart policy, please refer to
-[restart](https://github.com/compose-spec/compose-spec/blob/master/spec.md#restart)
-or [restart_policy](https://github.com/compose-spec/compose-spec/blob/master/deploy.md#restart_policy).
+If you are looking to configure a service's restart policy, refer to
+[restart](https://github.com/compose-spec/compose-spec/blob/main/spec.md#restart)
+or [restart_policy](https://github.com/compose-spec/compose-spec/blob/main/deploy.md#restart_policy).
diff --git a/docs/reference/compose_rm.md b/docs/reference/compose_rm.md
index 017c7f12294..5e84930bac3 100644
--- a/docs/reference/compose_rm.md
+++ b/docs/reference/compose_rm.md
@@ -1,20 +1,30 @@
# docker compose rm
-Removes stopped service containers
+Removes stopped service containers.
+
+By default, anonymous volumes attached to containers are not removed. You can override this with `-v`. To list all
+volumes, use `docker volume ls`.
-By default, anonymous volumes attached to containers will not be removed. You
-can override this with -v. To list all volumes, use "docker volume ls".
+Any data which is not in a volume is lost.
+
+Running the command with no options also removes one-off containers created by `docker compose run`:
-Any data which is not in a volume will be lost.
+```console
+$ docker compose rm
+Going to remove djangoquickstart_web_run_1
+Are you sure? [yN] y
+Removing djangoquickstart_web_run_1 ... done
+```
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `-f`, `--force` | | | Don't ask to confirm removal |
-| `-s`, `--stop` | | | Stop the containers, if required, before removing |
-| `-v`, `--volumes` | | | Remove any anonymous volumes attached to containers |
+| Name | Type | Default | Description |
+|:------------------|:-------|:--------|:----------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `-f`, `--force` | `bool` | | Don't ask to confirm removal |
+| `-s`, `--stop` | `bool` | | Stop the containers, if required, before removing |
+| `-v`, `--volumes` | `bool` | | Remove any anonymous volumes attached to containers |
diff --git a/docs/reference/compose_run.md b/docs/reference/compose_run.md
index 3d616963739..25b28d1ded8 100644
--- a/docs/reference/compose_run.md
+++ b/docs/reference/compose_run.md
@@ -1,28 +1,89 @@
# docker compose run
-Run a one-off command on a service.
+Runs a one-time command against a service.
+
+The following command starts the `web` service and runs `bash` as its command:
+
+```console
+$ docker compose run web bash
+```
+
+Commands you use with run start in new containers with configuration defined by that of the service,
+including volumes, links, and other details. However, there are two important differences:
+
+First, the command passed by `run` overrides the command defined in the service configuration. For example, if the
+`web` service configuration is started with `bash`, then `docker compose run web python app.py` overrides it with
+`python app.py`.
+
+The second difference is that the `docker compose run` command does not create any of the ports specified in the
+service configuration. This prevents port collisions with already-open ports. If you do want the service’s ports
+to be created and mapped to the host, specify the `--service-ports`
+
+```console
+$ docker compose run --service-ports web python manage.py shell
+```
+
+Alternatively, manual port mapping can be specified with the `--publish` or `-p` options, just as when using docker run:
+
+```console
+$ docker compose run --publish 8080:80 -p 2022:22 -p 127.0.0.1:2021:21 web python manage.py shell
+```
+
+If you start a service configured with links, the run command first checks to see if the linked service is running
+and starts the service if it is stopped. Once all the linked services are running, the run executes the command you
+passed it. For example, you could run:
+
+```console
+$ docker compose run db psql -h db -U docker
+```
+
+This opens an interactive PostgreSQL shell for the linked `db` container.
+
+If you do not want the run command to start linked containers, use the `--no-deps` flag:
+
+```console
+$ docker compose run --no-deps web python manage.py shell
+```
+
+If you want to remove the container after running while overriding the container’s restart policy, use the `--rm` flag:
+
+```console
+$ docker compose run --rm web python manage.py db upgrade
+```
+
+This runs a database upgrade script, and removes the container when finished running, even if a restart policy is
+specified in the service configuration.
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `-d`, `--detach` | | | Run container in background and print container ID |
-| `--entrypoint` | `string` | | Override the entrypoint of the image |
-| `-e`, `--env` | `stringArray` | | Set environment variables |
-| `-i`, `--interactive` | | | Keep STDIN open even if not attached. |
-| `-l`, `--label` | `stringArray` | | Add or override a label |
-| `--name` | `string` | | Assign a name to the container |
-| `-T`, `--no-TTY` | | | Disable pseudo-TTY allocation (default: auto-detected). |
-| `--no-deps` | | | Don't start linked services. |
-| `-p`, `--publish` | `stringArray` | | Publish a container's port(s) to the host. |
-| `--quiet-pull` | | | Pull without printing progress information. |
-| `--rm` | | | Automatically remove the container when it exits |
-| `--service-ports` | | | Run command with the service's ports enabled and mapped to the host. |
-| `--use-aliases` | | | Use the service's network useAliases in the network(s) the container connects to. |
-| `-u`, `--user` | `string` | | Run as specified username or uid |
-| `-v`, `--volume` | `stringArray` | | Bind mount a volume. |
-| `-w`, `--workdir` | `string` | | Working directory inside the container |
+| Name | Type | Default | Description |
+|:------------------------|:--------------|:---------|:---------------------------------------------------------------------------------|
+| `--build` | `bool` | | Build image before starting container |
+| `--cap-add` | `list` | | Add Linux capabilities |
+| `--cap-drop` | `list` | | Drop Linux capabilities |
+| `-d`, `--detach` | `bool` | | Run container in background and print container ID |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--entrypoint` | `string` | | Override the entrypoint of the image |
+| `-e`, `--env` | `stringArray` | | Set environment variables |
+| `--env-from-file` | `stringArray` | | Set environment variables from file |
+| `-i`, `--interactive` | `bool` | `true` | Keep STDIN open even if not attached |
+| `-l`, `--label` | `stringArray` | | Add or override a label |
+| `--name` | `string` | | Assign a name to the container |
+| `-T`, `--no-TTY` | `bool` | `true` | Disable pseudo-TTY allocation (default: auto-detected) |
+| `--no-deps` | `bool` | | Don't start linked services |
+| `-p`, `--publish` | `stringArray` | | Publish a container's port(s) to the host |
+| `--pull` | `string` | `policy` | Pull image before running ("always"\|"missing"\|"never") |
+| `-q`, `--quiet` | `bool` | | Don't print anything to STDOUT |
+| `--quiet-build` | `bool` | | Suppress progress output from the build process |
+| `--quiet-pull` | `bool` | | Pull without printing progress information |
+| `--remove-orphans` | `bool` | | Remove containers for services not defined in the Compose file |
+| `--rm` | `bool` | | Automatically remove the container when it exits |
+| `-P`, `--service-ports` | `bool` | | Run command with all service's ports enabled and mapped to the host |
+| `--use-aliases` | `bool` | | Use the service's network useAliases in the network(s) the container connects to |
+| `-u`, `--user` | `string` | | Run as specified username or uid |
+| `-v`, `--volume` | `stringArray` | | Bind mount a volume |
+| `-w`, `--workdir` | `string` | | Working directory inside the container |
@@ -31,7 +92,7 @@ Run a one-off command on a service.
Runs a one-time command against a service.
-the following command starts the `web` service and runs `bash` as its command:
+The following command starts the `web` service and runs `bash` as its command:
```console
$ docker compose run web bash
diff --git a/docs/reference/compose_scale.md b/docs/reference/compose_scale.md
new file mode 100644
index 00000000000..3d0dbdb04a2
--- /dev/null
+++ b/docs/reference/compose_scale.md
@@ -0,0 +1,15 @@
+# docker compose scale
+
+
+Scale services
+
+### Options
+
+| Name | Type | Default | Description |
+|:------------|:-------|:--------|:--------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--no-deps` | `bool` | | Don't start linked services |
+
+
+
+
diff --git a/docs/reference/compose_start.md b/docs/reference/compose_start.md
index 8f8cb5b480d..08db7ef2135 100644
--- a/docs/reference/compose_start.md
+++ b/docs/reference/compose_start.md
@@ -1,11 +1,17 @@
# docker compose start
-Start services
+Starts existing containers for a service
+
+### Options
+
+| Name | Type | Default | Description |
+|:------------|:-------|:--------|:--------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
## Description
-Starts existing containers for a service.
+Starts existing containers for a service
diff --git a/docs/reference/compose_stats.md b/docs/reference/compose_stats.md
new file mode 100644
index 00000000000..78d44b89350
--- /dev/null
+++ b/docs/reference/compose_stats.md
@@ -0,0 +1,18 @@
+# docker compose stats
+
+
+Display a live stream of container(s) resource usage statistics
+
+### Options
+
+| Name | Type | Default | Description |
+|:--------------|:---------|:--------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `-a`, `--all` | `bool` | | Show all containers (default shows just running) |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--format` | `string` | | Format output using a custom template:
'table': Print output in table format with column headers (default)
'table TEMPLATE': Print output in table format using the given Go template
'json': Print in JSON format
'TEMPLATE': Print output using the given Go template.
Refer to https://docs.docker.com/engine/cli/formatting/ for more information about formatting output with templates |
+| `--no-stream` | `bool` | | Disable streaming stats and only pull the first result |
+| `--no-trunc` | `bool` | | Do not truncate output |
+
+
+
+
diff --git a/docs/reference/compose_stop.md b/docs/reference/compose_stop.md
index 9d8ead8236c..fe84f24f8f5 100644
--- a/docs/reference/compose_stop.md
+++ b/docs/reference/compose_stop.md
@@ -1,13 +1,14 @@
# docker compose stop
-Stop services
+Stops running containers without removing them. They can be started again with `docker compose start`.
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `-t`, `--timeout` | `int` | `10` | Specify a shutdown timeout in seconds |
+| Name | Type | Default | Description |
+|:------------------|:-------|:--------|:--------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `-t`, `--timeout` | `int` | `0` | Specify a shutdown timeout in seconds |
diff --git a/docs/reference/compose_top.md b/docs/reference/compose_top.md
index eee5a3cab8d..eeacb3866aa 100644
--- a/docs/reference/compose_top.md
+++ b/docs/reference/compose_top.md
@@ -1,14 +1,20 @@
# docker compose top
-Display the running processes
+Displays the running processes
+
+### Options
+
+| Name | Type | Default | Description |
+|:------------|:-------|:--------|:--------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
## Description
-Displays the running processes.
+Displays the running processes
## Examples
diff --git a/docs/reference/compose_unpause.md b/docs/reference/compose_unpause.md
index e819c0c4f27..92841ceade7 100644
--- a/docs/reference/compose_unpause.md
+++ b/docs/reference/compose_unpause.md
@@ -1,11 +1,17 @@
# docker compose unpause
-Unpause services
+Unpauses paused containers of a service
+
+### Options
+
+| Name | Type | Default | Description |
+|:------------|:-------|:--------|:--------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
## Description
-Unpauses paused containers of a service.
+Unpauses paused containers of a service
diff --git a/docs/reference/compose_up.md b/docs/reference/compose_up.md
index 1a110f3f956..b7f17a0fac9 100644
--- a/docs/reference/compose_up.md
+++ b/docs/reference/compose_up.md
@@ -1,32 +1,60 @@
# docker compose up
-Create and start containers
+Builds, (re)creates, starts, and attaches to containers for a service.
+
+Unless they are already running, this command also starts any linked services.
+
+The `docker compose up` command aggregates the output of each container (like `docker compose logs --follow` does).
+One can optionally select a subset of services to attach to using `--attach` flag, or exclude some services using
+`--no-attach` to prevent output to be flooded by some verbose services.
+
+When the command exits, all containers are stopped. Running `docker compose up --detach` starts the containers in the
+background and leaves them running.
+
+If there are existing containers for a service, and the service’s configuration or image was changed after the
+container’s creation, `docker compose up` picks up the changes by stopping and recreating the containers
+(preserving mounted volumes). To prevent Compose from picking up changes, use the `--no-recreate` flag.
+
+If you want to force Compose to stop and recreate all containers, use the `--force-recreate` flag.
+
+If the process encounters an error, the exit code for this command is `1`.
+If the process is interrupted using `SIGINT` (ctrl + C) or `SIGTERM`, the containers are stopped, and the exit code is `0`.
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `--abort-on-container-exit` | | | Stops all containers if any container was stopped. Incompatible with -d |
-| `--always-recreate-deps` | | | Recreate dependent containers. Incompatible with --no-recreate. |
-| `--attach` | `stringArray` | | Attach to service output. |
-| `--attach-dependencies` | | | Attach to dependent containers. |
-| `--build` | | | Build images before starting containers. |
-| `-d`, `--detach` | | | Detached mode: Run containers in the background |
-| `--exit-code-from` | `string` | | Return the exit code of the selected service container. Implies --abort-on-container-exit |
-| `--force-recreate` | | | Recreate containers even if their configuration and image haven't changed. |
-| `--no-build` | | | Don't build an image, even if it's missing. |
-| `--no-color` | | | Produce monochrome output. |
-| `--no-deps` | | | Don't start linked services. |
-| `--no-log-prefix` | | | Don't print prefix in logs. |
-| `--no-recreate` | | | If containers already exist, don't recreate them. Incompatible with --force-recreate. |
-| `--no-start` | | | Don't start the services after creating them. |
-| `--quiet-pull` | | | Pull without printing progress information. |
-| `--remove-orphans` | | | Remove containers for services not defined in the Compose file. |
-| `-V`, `--renew-anon-volumes` | | | Recreate anonymous volumes instead of retrieving data from the previous containers. |
-| `--scale` | `stringArray` | | Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present. |
-| `-t`, `--timeout` | `int` | `10` | Use this timeout in seconds for container shutdown when attached or when containers are already running. |
-| `--wait` | | | Wait for services to be running\|healthy. Implies detached mode. |
+| Name | Type | Default | Description |
+|:-------------------------------|:--------------|:---------|:----------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--abort-on-container-exit` | `bool` | | Stops all containers if any container was stopped. Incompatible with -d |
+| `--abort-on-container-failure` | `bool` | | Stops all containers if any container exited with failure. Incompatible with -d |
+| `--always-recreate-deps` | `bool` | | Recreate dependent containers. Incompatible with --no-recreate. |
+| `--attach` | `stringArray` | | Restrict attaching to the specified services. Incompatible with --attach-dependencies. |
+| `--attach-dependencies` | `bool` | | Automatically attach to log output of dependent services |
+| `--build` | `bool` | | Build images before starting containers |
+| `-d`, `--detach` | `bool` | | Detached mode: Run containers in the background |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--exit-code-from` | `string` | | Return the exit code of the selected service container. Implies --abort-on-container-exit |
+| `--force-recreate` | `bool` | | Recreate containers even if their configuration and image haven't changed |
+| `--menu` | `bool` | | Enable interactive shortcuts when running attached. Incompatible with --detach. Can also be enable/disable by setting COMPOSE_MENU environment var. |
+| `--no-attach` | `stringArray` | | Do not attach (stream logs) to the specified services |
+| `--no-build` | `bool` | | Don't build an image, even if it's policy |
+| `--no-color` | `bool` | | Produce monochrome output |
+| `--no-deps` | `bool` | | Don't start linked services |
+| `--no-log-prefix` | `bool` | | Don't print prefix in logs |
+| `--no-recreate` | `bool` | | If containers already exist, don't recreate them. Incompatible with --force-recreate. |
+| `--no-start` | `bool` | | Don't start the services after creating them |
+| `--pull` | `string` | `policy` | Pull image before running ("always"\|"missing"\|"never") |
+| `--quiet-build` | `bool` | | Suppress the build output |
+| `--quiet-pull` | `bool` | | Pull without printing progress information |
+| `--remove-orphans` | `bool` | | Remove containers for services not defined in the Compose file |
+| `-V`, `--renew-anon-volumes` | `bool` | | Recreate anonymous volumes instead of retrieving data from the previous containers |
+| `--scale` | `stringArray` | | Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present. |
+| `-t`, `--timeout` | `int` | `0` | Use this timeout in seconds for container shutdown when attached or when containers are already running |
+| `--timestamps` | `bool` | | Show timestamps |
+| `--wait` | `bool` | | Wait for services to be running\|healthy. Implies detached mode. |
+| `--wait-timeout` | `int` | `0` | Maximum duration in seconds to wait for the project to be running\|healthy |
+| `-w`, `--watch` | `bool` | | Watch source code and rebuild/refresh containers when files are updated. |
+| `-y`, `--yes` | `bool` | | Assume "yes" as answer to all prompts and run non-interactively |
@@ -38,6 +66,9 @@ Builds, (re)creates, starts, and attaches to containers for a service.
Unless they are already running, this command also starts any linked services.
The `docker compose up` command aggregates the output of each container (like `docker compose logs --follow` does).
+One can optionally select a subset of services to attach to using `--attach` flag, or exclude some services using
+`--no-attach` to prevent output to be flooded by some verbose services.
+
When the command exits, all containers are stopped. Running `docker compose up --detach` starts the containers in the
background and leaves them running.
diff --git a/docs/reference/compose_version.md b/docs/reference/compose_version.md
index 94b2d36dcd0..3a6329dadb4 100644
--- a/docs/reference/compose_version.md
+++ b/docs/reference/compose_version.md
@@ -5,10 +5,11 @@ Show the Docker Compose version information
### Options
-| Name | Type | Default | Description |
-| --- | --- | --- | --- |
-| `-f`, `--format` | `string` | | Format the output. Values: [pretty \| json]. (Default: pretty) |
-| `--short` | | | Shows only Compose's version number. |
+| Name | Type | Default | Description |
+|:-----------------|:---------|:--------|:---------------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `-f`, `--format` | `string` | | Format the output. Values: [pretty \| json]. (Default: pretty) |
+| `--short` | `bool` | | Shows only Compose's version number |
diff --git a/docs/reference/compose_volumes.md b/docs/reference/compose_volumes.md
new file mode 100644
index 00000000000..6bad874f187
--- /dev/null
+++ b/docs/reference/compose_volumes.md
@@ -0,0 +1,16 @@
+# docker compose volumes
+
+
+List volumes
+
+### Options
+
+| Name | Type | Default | Description |
+|:----------------|:---------|:--------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--format` | `string` | `table` | Format output using a custom template:
'table': Print output in table format with column headers (default)
'table TEMPLATE': Print output in table format using the given Go template
'json': Print in JSON format
'TEMPLATE': Print output using the given Go template.
Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates |
+| `-q`, `--quiet` | `bool` | | Only display volume names |
+
+
+
+
diff --git a/docs/reference/compose_wait.md b/docs/reference/compose_wait.md
new file mode 100644
index 00000000000..59474c9b509
--- /dev/null
+++ b/docs/reference/compose_wait.md
@@ -0,0 +1,15 @@
+# docker compose wait
+
+
+Block until containers of all (or specified) services stop.
+
+### Options
+
+| Name | Type | Default | Description |
+|:-----------------|:-------|:--------|:---------------------------------------------|
+| `--down-project` | `bool` | | Drops project when the first container stops |
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+
+
+
+
diff --git a/docs/reference/compose_watch.md b/docs/reference/compose_watch.md
new file mode 100644
index 00000000000..f6040c9094f
--- /dev/null
+++ b/docs/reference/compose_watch.md
@@ -0,0 +1,17 @@
+# docker compose watch
+
+
+Watch build context for service and rebuild/refresh containers when files are updated
+
+### Options
+
+| Name | Type | Default | Description |
+|:------------|:-------|:--------|:----------------------------------------------|
+| `--dry-run` | `bool` | | Execute command in dry run mode |
+| `--no-up` | `bool` | | Do not build & start services before watching |
+| `--prune` | `bool` | `true` | Prune dangling images on rebuild |
+| `--quiet` | `bool` | | hide build output |
+
+
+
+
diff --git a/docs/reference/docker_compose.yaml b/docs/reference/docker_compose.yaml
index 2cf5cde6cc2..c5fdb937510 100644
--- a/docs/reference/docker_compose.yaml
+++ b/docs/reference/docker_compose.yaml
@@ -1,278 +1,443 @@
command: docker compose
short: Docker Compose
-long: |-
- You can use compose subcommand, `docker compose [-f ...] [options] [COMMAND] [ARGS...]`, to build and manage
- multiple services in Docker containers.
+long: Define and run multi-container applications with Docker
+usage: docker compose
+pname: docker
+plink: docker.yaml
+cname:
+ - docker compose attach
+ - docker compose bridge
+ - docker compose build
+ - docker compose commit
+ - docker compose config
+ - docker compose cp
+ - docker compose create
+ - docker compose down
+ - docker compose events
+ - docker compose exec
+ - docker compose export
+ - docker compose images
+ - docker compose kill
+ - docker compose logs
+ - docker compose ls
+ - docker compose pause
+ - docker compose port
+ - docker compose ps
+ - docker compose publish
+ - docker compose pull
+ - docker compose push
+ - docker compose restart
+ - docker compose rm
+ - docker compose run
+ - docker compose scale
+ - docker compose start
+ - docker compose stats
+ - docker compose stop
+ - docker compose top
+ - docker compose unpause
+ - docker compose up
+ - docker compose version
+ - docker compose volumes
+ - docker compose wait
+ - docker compose watch
+clink:
+ - docker_compose_attach.yaml
+ - docker_compose_bridge.yaml
+ - docker_compose_build.yaml
+ - docker_compose_commit.yaml
+ - docker_compose_config.yaml
+ - docker_compose_cp.yaml
+ - docker_compose_create.yaml
+ - docker_compose_down.yaml
+ - docker_compose_events.yaml
+ - docker_compose_exec.yaml
+ - docker_compose_export.yaml
+ - docker_compose_images.yaml
+ - docker_compose_kill.yaml
+ - docker_compose_logs.yaml
+ - docker_compose_ls.yaml
+ - docker_compose_pause.yaml
+ - docker_compose_port.yaml
+ - docker_compose_ps.yaml
+ - docker_compose_publish.yaml
+ - docker_compose_pull.yaml
+ - docker_compose_push.yaml
+ - docker_compose_restart.yaml
+ - docker_compose_rm.yaml
+ - docker_compose_run.yaml
+ - docker_compose_scale.yaml
+ - docker_compose_start.yaml
+ - docker_compose_stats.yaml
+ - docker_compose_stop.yaml
+ - docker_compose_top.yaml
+ - docker_compose_unpause.yaml
+ - docker_compose_up.yaml
+ - docker_compose_version.yaml
+ - docker_compose_volumes.yaml
+ - docker_compose_wait.yaml
+ - docker_compose_watch.yaml
+options:
+ - option: all-resources
+ value_type: bool
+ default_value: "false"
+ description: Include all resources, even those not used by services
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: ansi
+ value_type: string
+ default_value: auto
+ description: |
+ Control when to print ANSI control characters ("never"|"always"|"auto")
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: compatibility
+ value_type: bool
+ default_value: "false"
+ description: Run compose in backward compatibility mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: env-file
+ value_type: stringArray
+ default_value: '[]'
+ description: Specify an alternate environment file
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: file
+ shorthand: f
+ value_type: stringArray
+ default_value: '[]'
+ description: Compose configuration files
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: insecure-registry
+ value_type: stringArray
+ default_value: '[]'
+ description: |
+ Use insecure registry to pull Compose OCI artifacts. Doesn't apply to images
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-ansi
+ value_type: bool
+ default_value: "false"
+ description: Do not print ANSI control characters (DEPRECATED)
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: parallel
+ value_type: int
+ default_value: "-1"
+ description: Control max parallelism, -1 for unlimited
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: profile
+ value_type: stringArray
+ default_value: '[]'
+ description: Specify a profile to enable
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: progress
+ value_type: string
+ description: Set type of progress output (auto, tty, plain, json, quiet)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: project-directory
+ value_type: string
+ description: |-
+ Specify an alternate working directory
+ (default: the path of the, first specified, Compose file)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: project-name
+ shorthand: p
+ value_type: string
+ description: Project name
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: verbose
+ value_type: bool
+ default_value: "false"
+ description: Show more output
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: version
+ shorthand: v
+ value_type: bool
+ default_value: "false"
+ description: Show the Docker Compose version information
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: workdir
+ value_type: string
+ description: |-
+ DEPRECATED! USE --project-directory INSTEAD.
+ Specify an alternate working directory
+ (default: the path of the, first specified, Compose file)
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+examples: |-
+ ### Use `-f` to specify the name and path of one or more Compose files
+ Use the `-f` flag to specify the location of a Compose [configuration file](/reference/compose-file/).
- ### Use `-f` to specify name and path of one or more Compose files
- Use the `-f` flag to specify the location of a Compose configuration file.
+ #### Specifying multiple Compose files
+ You can supply multiple `-f` configuration files. When you supply multiple files, Compose combines them into a single
+ configuration. Compose builds the configuration in the order you supply the files. Subsequent files override and add
+ to their predecessors.
- #### Specifying multiple Compose files
- You can supply multiple `-f` configuration files. When you supply multiple files, Compose combines them into a single
- configuration. Compose builds the configuration in the order you supply the files. Subsequent files override and add
- to their predecessors.
+ For example, consider this command line:
- For example, consider this command line:
+ ```console
+ $ docker compose -f compose.yaml -f compose.admin.yaml run backup_db
+ ```
- ```console
- $ docker compose -f docker-compose.yml -f docker-compose.admin.yml run backup_db
- ```
+ The `compose.yaml` file might specify a `webapp` service.
- The `docker-compose.yml` file might specify a `webapp` service.
+ ```yaml
+ services:
+ webapp:
+ image: examples/web
+ ports:
+ - "8000:8000"
+ volumes:
+ - "/data"
+ ```
+ If the `compose.admin.yaml` also specifies this same service, any matching fields override the previous file.
+ New values, add to the `webapp` service configuration.
- ```yaml
- services:
- webapp:
- image: examples/web
- ports:
- - "8000:8000"
- volumes:
- - "/data"
- ```
- If the `docker-compose.admin.yml` also specifies this same service, any matching fields override the previous file.
- New values, add to the `webapp` service configuration.
+ ```yaml
+ services:
+ webapp:
+ build: .
+ environment:
+ - DEBUG=1
+ ```
- ```yaml
- services:
- webapp:
- build: .
- environment:
- - DEBUG=1
- ```
+ When you use multiple Compose files, all paths in the files are relative to the first configuration file specified
+ with `-f`. You can use the `--project-directory` option to override this base path.
- When you use multiple Compose files, all paths in the files are relative to the first configuration file specified
- with `-f`. You can use the `--project-directory` option to override this base path.
+ Use a `-f` with `-` (dash) as the filename to read the configuration from stdin. When stdin is used all paths in the
+ configuration are relative to the current working directory.
- Use a `-f` with `-` (dash) as the filename to read the configuration from stdin. When stdin is used all paths in the
- configuration are relative to the current working directory.
+ The `-f` flag is optional. If you don’t provide this flag on the command line, Compose traverses the working directory
+ and its parent directories looking for a `compose.yaml` or `docker-compose.yaml` file.
- The `-f` flag is optional. If you don’t provide this flag on the command line, Compose traverses the working directory
- and its parent directories looking for a `compose.yaml` or `docker-compose.yaml` file.
+ #### Specifying a path to a single Compose file
+ You can use the `-f` flag to specify a path to a Compose file that is not located in the current directory, either
+ from the command line or by setting up a `COMPOSE_FILE` environment variable in your shell or in an environment file.
- #### Specifying a path to a single Compose file
- You can use the `-f` flag to specify a path to a Compose file that is not located in the current directory, either
- from the command line or by setting up a `COMPOSE_FILE` environment variable in your shell or in an environment file.
+ For an example of using the `-f` option at the command line, suppose you are running the Compose Rails sample, and
+ have a `compose.yaml` file in a directory called `sandbox/rails`. You can use a command like `docker compose pull` to
+ get the postgres image for the db service from anywhere by using the `-f` flag as follows:
- For an example of using the `-f` option at the command line, suppose you are running the Compose Rails sample, and
- have a `compose.yaml` file in a directory called `sandbox/rails`. You can use a command like `docker compose pull` to
- get the postgres image for the db service from anywhere by using the `-f` flag as follows:
+ ```console
+ $ docker compose -f ~/sandbox/rails/compose.yaml pull db
+ ```
- ```console
- $ docker compose -f ~/sandbox/rails/compose.yaml pull db
- ```
+ #### Using an OCI published artifact
+ You can use the `-f` flag with the `oci://` prefix to reference a Compose file that has been published to an OCI registry.
+ This allows you to distribute and version your Compose configurations as OCI artifacts.
- ### Use `-p` to specify a project name
+ To use a Compose file from an OCI registry:
- Each configuration has a project name. If you supply a `-p` flag, you can specify a project name. If you don’t
- specify the flag, Compose uses the current directory name.
- Project name can also be set by `COMPOSE_PROJECT_NAME` environment variable.
+ ```console
+ $ docker compose -f oci://registry.example.com/my-compose-project:latest up
+ ```
- Most compose subcommand can be ran without a compose file, just passing
- project name to retrieve the relevant resources.
+ You can also combine OCI artifacts with local files:
- ```console
- $ docker compose -p my_project ps -a
- NAME SERVICE STATUS PORTS
- my_project_demo_1 demo running
+ ```console
+ $ docker compose -f oci://registry.example.com/my-compose-project:v1.0 -f compose.override.yaml up
+ ```
- $ docker compose -p my_project logs
- demo_1 | PING localhost (127.0.0.1): 56 data bytes
- demo_1 | 64 bytes from 127.0.0.1: seq=0 ttl=64 time=0.095 ms
- ```
+ The OCI artifact must contain a valid Compose file. You can publish Compose files to an OCI registry using the
+ `docker compose publish` command.
- ### Use profiles to enable optional services
+ #### Using a git repository
+ You can use the `-f` flag to reference a Compose file from a git repository. Compose supports various git URL formats:
- Use `--profile` to specify one or more active profiles
- Calling `docker compose --profile frontend up` will start the services with the profile `frontend` and services
- without any specified profiles.
- You can also enable multiple profiles, e.g. with `docker compose --profile frontend --profile debug up` the profiles `frontend` and `debug` will be enabled.
+ Using HTTPS:
+ ```console
+ $ docker compose -f https://github.com/user/repo.git up
+ ```
- Profiles can also be set by `COMPOSE_PROFILES` environment variable.
+ Using SSH:
+ ```console
+ $ docker compose -f git@github.com:user/repo.git up
+ ```
- ### Set up environment variables
+ You can specify a specific branch, tag, or commit:
+ ```console
+ $ docker compose -f https://github.com/user/repo.git@main up
+ $ docker compose -f https://github.com/user/repo.git@v1.0.0 up
+ $ docker compose -f https://github.com/user/repo.git@abc123 up
+ ```
- You can set environment variables for various docker compose options, including the `-f`, `-p` and `--profiles` flags.
+ You can also specify a subdirectory within the repository:
+ ```console
+ $ docker compose -f https://github.com/user/repo.git#main:path/to/compose.yaml up
+ ```
- Setting the `COMPOSE_FILE` environment variable is equivalent to passing the `-f` flag,
- `COMPOSE_PROJECT_NAME` environment variable does the same for to the `-p` flag,
- and so does `COMPOSE_PROFILES` environment variable for to the `--profiles` flag.
+ When using git resources, Compose will clone the repository and use the specified Compose file. You can combine
+ git resources with local files:
- If flags are explicitly set on command line, associated environment variable is ignored
+ ```console
+ $ docker compose -f https://github.com/user/repo.git -f compose.override.yaml up
+ ```
- Setting the `COMPOSE_IGNORE_ORPHANS` environment variable to `true` will stop docker compose from detecting orphaned
- containers for the project.
-usage: docker compose
-pname: docker
-plink: docker.yaml
-cname:
-- docker compose build
-- docker compose convert
-- docker compose cp
-- docker compose create
-- docker compose down
-- docker compose events
-- docker compose exec
-- docker compose images
-- docker compose kill
-- docker compose logs
-- docker compose ls
-- docker compose pause
-- docker compose port
-- docker compose ps
-- docker compose pull
-- docker compose push
-- docker compose restart
-- docker compose rm
-- docker compose run
-- docker compose start
-- docker compose stop
-- docker compose top
-- docker compose unpause
-- docker compose up
-- docker compose version
-clink:
-- docker_compose_build.yaml
-- docker_compose_convert.yaml
-- docker_compose_cp.yaml
-- docker_compose_create.yaml
-- docker_compose_down.yaml
-- docker_compose_events.yaml
-- docker_compose_exec.yaml
-- docker_compose_images.yaml
-- docker_compose_kill.yaml
-- docker_compose_logs.yaml
-- docker_compose_ls.yaml
-- docker_compose_pause.yaml
-- docker_compose_port.yaml
-- docker_compose_ps.yaml
-- docker_compose_pull.yaml
-- docker_compose_push.yaml
-- docker_compose_restart.yaml
-- docker_compose_rm.yaml
-- docker_compose_run.yaml
-- docker_compose_start.yaml
-- docker_compose_stop.yaml
-- docker_compose_top.yaml
-- docker_compose_unpause.yaml
-- docker_compose_up.yaml
-- docker_compose_version.yaml
-options:
-- option: ansi
- value_type: string
- default_value: auto
- description: |
- Control when to print ANSI control characters ("never"|"always"|"auto")
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: compatibility
- value_type: bool
- default_value: "false"
- description: Run compose in backward compatibility mode
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: env-file
- value_type: string
- description: Specify an alternate environment file.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: file
- shorthand: f
- value_type: stringArray
- default_value: '[]'
- description: Compose configuration files
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-ansi
- value_type: bool
- default_value: "false"
- description: Do not print ANSI control characters (DEPRECATED)
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: profile
- value_type: stringArray
- default_value: '[]'
- description: Specify a profile to enable
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: project-directory
- value_type: string
- description: |-
- Specify an alternate working directory
- (default: the path of the Compose file)
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: project-name
- shorthand: p
- value_type: string
- description: Project name
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: verbose
- value_type: bool
- default_value: "false"
- description: Show more output
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: version
- shorthand: v
- value_type: bool
- default_value: "false"
- description: Show the Docker Compose version information
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: workdir
- value_type: string
- description: |-
- DEPRECATED! USE --project-directory INSTEAD.
- Specify an alternate working directory
- (default: the path of the Compose file)
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ ### Use `-p` to specify a project name
+
+ Each configuration has a project name. Compose sets the project name using
+ the following mechanisms, in order of precedence:
+ - The `-p` command line flag
+ - The `COMPOSE_PROJECT_NAME` environment variable
+ - The top level `name:` variable from the config file (or the last `name:`
+ from a series of config files specified using `-f`)
+ - The `basename` of the project directory containing the config file (or
+ containing the first config file specified using `-f`)
+ - The `basename` of the current directory if no config file is specified
+ Project names must contain only lowercase letters, decimal digits, dashes,
+ and underscores, and must begin with a lowercase letter or decimal digit. If
+ the `basename` of the project directory or current directory violates this
+ constraint, you must use one of the other mechanisms.
+
+ ```console
+ $ docker compose -p my_project ps -a
+ NAME SERVICE STATUS PORTS
+ my_project_demo_1 demo running
+
+ $ docker compose -p my_project logs
+ demo_1 | PING localhost (127.0.0.1): 56 data bytes
+ demo_1 | 64 bytes from 127.0.0.1: seq=0 ttl=64 time=0.095 ms
+ ```
+
+ ### Use profiles to enable optional services
+
+ Use `--profile` to specify one or more active profiles
+ Calling `docker compose --profile frontend up` starts the services with the profile `frontend` and services
+ without any specified profiles.
+ You can also enable multiple profiles, e.g. with `docker compose --profile frontend --profile debug up` the profiles `frontend` and `debug` is enabled.
+
+ Profiles can also be set by `COMPOSE_PROFILES` environment variable.
+
+ ### Configuring parallelism
+
+ Use `--parallel` to specify the maximum level of parallelism for concurrent engine calls.
+ Calling `docker compose --parallel 1 pull` pulls the pullable images defined in the Compose file
+ one at a time. This can also be used to control build concurrency.
+
+ Parallelism can also be set by the `COMPOSE_PARALLEL_LIMIT` environment variable.
+
+ ### Set up environment variables
+
+ You can set environment variables for various docker compose options, including the `-f`, `-p` and `--profiles` flags.
+
+ Setting the `COMPOSE_FILE` environment variable is equivalent to passing the `-f` flag,
+ `COMPOSE_PROJECT_NAME` environment variable does the same as the `-p` flag,
+ `COMPOSE_PROFILES` environment variable is equivalent to the `--profiles` flag
+ and `COMPOSE_PARALLEL_LIMIT` does the same as the `--parallel` flag.
+
+ If flags are explicitly set on the command line, the associated environment variable is ignored.
+
+ Setting the `COMPOSE_IGNORE_ORPHANS` environment variable to `true` stops docker compose from detecting orphaned
+ containers for the project.
+
+ Setting the `COMPOSE_MENU` environment variable to `false` disables the helper menu when running `docker compose up`
+ in attached mode. Alternatively, you can also run `docker compose up --menu=false` to disable the helper menu.
+
+ ### Use Dry Run mode to test your command
+
+ Use `--dry-run` flag to test a command without changing your application stack state.
+ Dry Run mode shows you all the steps Compose applies when executing a command, for example:
+ ```console
+ $ docker compose --dry-run up --build -d
+ [+] Pulling 1/1
+ ✔ DRY-RUN MODE - db Pulled 0.9s
+ [+] Running 10/8
+ ✔ DRY-RUN MODE - build service backend 0.0s
+ ✔ DRY-RUN MODE - ==> ==> writing image dryRun-754a08ddf8bcb1cf22f310f09206dd783d42f7dd 0.0s
+ ✔ DRY-RUN MODE - ==> ==> naming to nginx-golang-mysql-backend 0.0s
+ ✔ DRY-RUN MODE - Network nginx-golang-mysql_default Created 0.0s
+ ✔ DRY-RUN MODE - Container nginx-golang-mysql-db-1 Created 0.0s
+ ✔ DRY-RUN MODE - Container nginx-golang-mysql-backend-1 Created 0.0s
+ ✔ DRY-RUN MODE - Container nginx-golang-mysql-proxy-1 Created 0.0s
+ ✔ DRY-RUN MODE - Container nginx-golang-mysql-db-1 Healthy 0.5s
+ ✔ DRY-RUN MODE - Container nginx-golang-mysql-backend-1 Started 0.0s
+ ✔ DRY-RUN MODE - Container nginx-golang-mysql-proxy-1 Started Started
+ ```
+ From the example above, you can see that the first step is to pull the image defined by `db` service, then build the `backend` service.
+ Next, the containers are created. The `db` service is started, and the `backend` and `proxy` wait until the `db` service is healthy before starting.
+
+ Dry Run mode works with almost all commands. You cannot use Dry Run mode with a command that doesn't change the state of a Compose stack such as `ps`, `ls`, `logs` for example.
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_alpha.yaml b/docs/reference/docker_compose_alpha.yaml
new file mode 100644
index 00000000000..e6b6b6e6b6f
--- /dev/null
+++ b/docs/reference/docker_compose_alpha.yaml
@@ -0,0 +1,31 @@
+command: docker compose alpha
+short: Experimental commands
+long: Experimental commands
+pname: docker compose
+plink: docker_compose.yaml
+cname:
+ - docker compose alpha generate
+ - docker compose alpha publish
+ - docker compose alpha viz
+clink:
+ - docker_compose_alpha_generate.yaml
+ - docker_compose_alpha_publish.yaml
+ - docker_compose_alpha_viz.yaml
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: true
+experimental: false
+experimentalcli: true
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_alpha_dry-run.yaml b/docs/reference/docker_compose_alpha_dry-run.yaml
new file mode 100644
index 00000000000..d489d39aeba
--- /dev/null
+++ b/docs/reference/docker_compose_alpha_dry-run.yaml
@@ -0,0 +1,14 @@
+command: docker compose alpha dry-run
+short: |
+ EXPERIMENTAL - Dry run command allow you to test a command without applying changes
+long: |
+ EXPERIMENTAL - Dry run command allow you to test a command without applying changes
+usage: docker compose alpha dry-run -- [COMMAND...]
+pname: docker compose alpha
+plink: docker_compose_alpha.yaml
+deprecated: false
+experimental: false
+experimentalcli: true
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_alpha_generate.yaml b/docs/reference/docker_compose_alpha_generate.yaml
new file mode 100644
index 00000000000..f31429c2d72
--- /dev/null
+++ b/docs/reference/docker_compose_alpha_generate.yaml
@@ -0,0 +1,53 @@
+command: docker compose alpha generate
+short: EXPERIMENTAL - Generate a Compose file from existing containers
+long: EXPERIMENTAL - Generate a Compose file from existing containers
+usage: docker compose alpha generate [OPTIONS] [CONTAINERS...]
+pname: docker compose alpha
+plink: docker_compose_alpha.yaml
+options:
+ - option: format
+ value_type: string
+ default_value: yaml
+ description: 'Format the output. Values: [yaml | json]'
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: name
+ value_type: string
+ description: Project name to set in the Compose file
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: project-dir
+ value_type: string
+ description: Directory to use for the project
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: true
+experimental: false
+experimentalcli: true
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_alpha_publish.yaml b/docs/reference/docker_compose_alpha_publish.yaml
new file mode 100644
index 00000000000..9059cbf4869
--- /dev/null
+++ b/docs/reference/docker_compose_alpha_publish.yaml
@@ -0,0 +1,86 @@
+command: docker compose alpha publish
+short: Publish compose application
+long: Publish compose application
+usage: docker compose alpha publish [OPTIONS] REPOSITORY[:TAG]
+pname: docker compose alpha
+plink: docker_compose_alpha.yaml
+options:
+ - option: app
+ value_type: bool
+ default_value: "false"
+ description: Published compose application (includes referenced images)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: insecure-registry
+ value_type: bool
+ default_value: "false"
+ description: Use insecure registry
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: oci-version
+ value_type: string
+ description: |
+ OCI image/artifact specification version (automatically determined by default)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: resolve-image-digests
+ value_type: bool
+ default_value: "false"
+ description: Pin image tags to digests
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: with-env
+ value_type: bool
+ default_value: "false"
+ description: Include environment variables in the published OCI artifact
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: "yes"
+ shorthand: "y"
+ value_type: bool
+ default_value: "false"
+ description: Assume "yes" as answer to all prompts
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: true
+experimental: false
+experimentalcli: true
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_alpha_scale.yaml b/docs/reference/docker_compose_alpha_scale.yaml
new file mode 100644
index 00000000000..cc381493fa3
--- /dev/null
+++ b/docs/reference/docker_compose_alpha_scale.yaml
@@ -0,0 +1,35 @@
+command: docker compose alpha scale
+short: Scale services
+long: Scale services
+usage: docker compose alpha scale [SERVICE=REPLICAS...]
+pname: docker compose alpha
+plink: docker_compose_alpha.yaml
+options:
+ - option: no-deps
+ value_type: bool
+ default_value: "false"
+ description: Don't start linked services.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: true
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_alpha_viz.yaml b/docs/reference/docker_compose_alpha_viz.yaml
new file mode 100644
index 00000000000..c07475caac8
--- /dev/null
+++ b/docs/reference/docker_compose_alpha_viz.yaml
@@ -0,0 +1,77 @@
+command: docker compose alpha viz
+short: EXPERIMENTAL - Generate a graphviz graph from your compose file
+long: EXPERIMENTAL - Generate a graphviz graph from your compose file
+usage: docker compose alpha viz [OPTIONS]
+pname: docker compose alpha
+plink: docker_compose_alpha.yaml
+options:
+ - option: image
+ value_type: bool
+ default_value: "false"
+ description: Include service's image name in output graph
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: indentation-size
+ value_type: int
+ default_value: "1"
+ description: Number of tabs or spaces to use for indentation
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: networks
+ value_type: bool
+ default_value: "false"
+ description: Include service's attached networks in output graph
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: ports
+ value_type: bool
+ default_value: "false"
+ description: Include service's exposed ports in output graph
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: spaces
+ value_type: bool
+ default_value: "false"
+ description: |-
+ If given, space character ' ' will be used to indent,
+ otherwise tab character '\t' will be used
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: true
+experimental: false
+experimentalcli: true
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_alpha_watch.yaml b/docs/reference/docker_compose_alpha_watch.yaml
new file mode 100644
index 00000000000..e8a7d9845ab
--- /dev/null
+++ b/docs/reference/docker_compose_alpha_watch.yaml
@@ -0,0 +1,47 @@
+command: docker compose alpha watch
+short: |
+ Watch build context for service and rebuild/refresh containers when files are updated
+long: |
+ Watch build context for service and rebuild/refresh containers when files are updated
+usage: docker compose alpha watch [SERVICE...]
+pname: docker compose alpha
+plink: docker_compose_alpha.yaml
+options:
+ - option: no-up
+ value_type: bool
+ default_value: "false"
+ description: Do not build & start services before watching
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet
+ value_type: bool
+ default_value: "false"
+ description: hide build output
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: true
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_attach.yaml b/docs/reference/docker_compose_attach.yaml
new file mode 100644
index 00000000000..8fd6957ca18
--- /dev/null
+++ b/docs/reference/docker_compose_attach.yaml
@@ -0,0 +1,66 @@
+command: docker compose attach
+short: |
+ Attach local standard input, output, and error streams to a service's running container
+long: |
+ Attach local standard input, output, and error streams to a service's running container
+usage: docker compose attach [OPTIONS] SERVICE
+pname: docker compose
+plink: docker_compose.yaml
+options:
+ - option: detach-keys
+ value_type: string
+ description: Override the key sequence for detaching from a container.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: index
+ value_type: int
+ default_value: "0"
+ description: index of the container if service has multiple replicas.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-stdin
+ value_type: bool
+ default_value: "false"
+ description: Do not attach STDIN
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: sig-proxy
+ value_type: bool
+ default_value: "true"
+ description: Proxy all received signals to the process
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_bridge.yaml b/docs/reference/docker_compose_bridge.yaml
new file mode 100644
index 00000000000..5ef9ebf5585
--- /dev/null
+++ b/docs/reference/docker_compose_bridge.yaml
@@ -0,0 +1,29 @@
+command: docker compose bridge
+short: Convert compose files into another model
+long: Convert compose files into another model
+pname: docker compose
+plink: docker_compose.yaml
+cname:
+ - docker compose bridge convert
+ - docker compose bridge transformations
+clink:
+ - docker_compose_bridge_convert.yaml
+ - docker_compose_bridge_transformations.yaml
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_bridge_convert.yaml b/docs/reference/docker_compose_bridge_convert.yaml
new file mode 100644
index 00000000000..f55f0b233c3
--- /dev/null
+++ b/docs/reference/docker_compose_bridge_convert.yaml
@@ -0,0 +1,59 @@
+command: docker compose bridge convert
+short: |
+ Convert compose files to Kubernetes manifests, Helm charts, or another model
+long: |
+ Convert compose files to Kubernetes manifests, Helm charts, or another model
+usage: docker compose bridge convert
+pname: docker compose bridge
+plink: docker_compose_bridge.yaml
+options:
+ - option: output
+ shorthand: o
+ value_type: string
+ default_value: out
+ description: The output directory for the Kubernetes resources
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: templates
+ value_type: string
+ description: Directory containing transformation templates
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: transformation
+ shorthand: t
+ value_type: stringArray
+ default_value: '[]'
+ description: |
+ Transformation to apply to compose model (default: docker/compose-bridge-kubernetes)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_bridge_transformations.yaml b/docs/reference/docker_compose_bridge_transformations.yaml
new file mode 100644
index 00000000000..2ab5661f0b2
--- /dev/null
+++ b/docs/reference/docker_compose_bridge_transformations.yaml
@@ -0,0 +1,29 @@
+command: docker compose bridge transformations
+short: Manage transformation images
+long: Manage transformation images
+pname: docker compose bridge
+plink: docker_compose_bridge.yaml
+cname:
+ - docker compose bridge transformations create
+ - docker compose bridge transformations list
+clink:
+ - docker_compose_bridge_transformations_create.yaml
+ - docker_compose_bridge_transformations_list.yaml
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_bridge_transformations_create.yaml b/docs/reference/docker_compose_bridge_transformations_create.yaml
new file mode 100644
index 00000000000..e8dd9e58a51
--- /dev/null
+++ b/docs/reference/docker_compose_bridge_transformations_create.yaml
@@ -0,0 +1,36 @@
+command: docker compose bridge transformations create
+short: Create a new transformation
+long: Create a new transformation
+usage: docker compose bridge transformations create [OPTION] PATH
+pname: docker compose bridge transformations
+plink: docker_compose_bridge_transformations.yaml
+options:
+ - option: from
+ shorthand: f
+ value_type: string
+ description: |
+ Existing transformation to copy (default: docker/compose-bridge-kubernetes)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_bridge_transformations_list.yaml b/docs/reference/docker_compose_bridge_transformations_list.yaml
new file mode 100644
index 00000000000..3afd3a84b8e
--- /dev/null
+++ b/docs/reference/docker_compose_bridge_transformations_list.yaml
@@ -0,0 +1,47 @@
+command: docker compose bridge transformations list
+aliases: docker compose bridge transformations list, docker compose bridge transformations ls
+short: List available transformations
+long: List available transformations
+usage: docker compose bridge transformations list
+pname: docker compose bridge transformations
+plink: docker_compose_bridge_transformations.yaml
+options:
+ - option: format
+ value_type: string
+ default_value: table
+ description: 'Format the output. Values: [table | json]'
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet
+ shorthand: q
+ value_type: bool
+ default_value: "false"
+ description: Only display transformer names
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_build.yaml b/docs/reference/docker_compose_build.yaml
index 05c62bd341e..e645a40aac2 100644
--- a/docs/reference/docker_compose_build.yaml
+++ b/docs/reference/docker_compose_build.yaml
@@ -1,133 +1,212 @@
command: docker compose build
short: Build or rebuild services
long: |-
- Services are built once and then tagged, by default as `project_service`.
+ Services are built once and then tagged, by default as `project-service`.
- If the Compose file specifies an
- [image](https://github.com/compose-spec/compose-spec/blob/master/spec.md#image) name,
- the image is tagged with that name, substituting any variables beforehand. See
- [variable interpolation](https://github.com/compose-spec/compose-spec/blob/master/spec.md#interpolation).
+ If the Compose file specifies an
+ [image](https://github.com/compose-spec/compose-spec/blob/main/spec.md#image) name,
+ the image is tagged with that name, substituting any variables beforehand. See
+ [variable interpolation](https://github.com/compose-spec/compose-spec/blob/main/spec.md#interpolation).
- If you change a service's `Dockerfile` or the contents of its build directory,
- run `docker compose build` to rebuild it.
-usage: docker compose build [SERVICE...]
+ If you change a service's `Dockerfile` or the contents of its build directory,
+ run `docker compose build` to rebuild it.
+usage: docker compose build [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: build-arg
- value_type: stringArray
- default_value: '[]'
- description: Set build-time variables for services.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: compress
- value_type: bool
- default_value: "true"
- description: Compress the build context using gzip. DEPRECATED
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: force-rm
- value_type: bool
- default_value: "true"
- description: Always remove intermediate containers. DEPRECATED
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: memory
- shorthand: m
- value_type: string
- description: |
- Set memory limit for the build container. Not supported on buildkit yet.
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-cache
- value_type: bool
- default_value: "false"
- description: Do not use cache when building the image
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-rm
- value_type: bool
- default_value: "false"
- description: |
- Do not remove intermediate containers after a successful build. DEPRECATED
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: parallel
- value_type: bool
- default_value: "true"
- description: Build images in parallel. DEPRECATED
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: progress
- value_type: string
- default_value: auto
- description: Set type of progress output (auto, tty, plain, quiet)
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: pull
- value_type: bool
- default_value: "false"
- description: Always attempt to pull a newer version of the image.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: quiet
- shorthand: q
- value_type: bool
- default_value: "false"
- description: Don't print anything to STDOUT
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: ssh
- value_type: string
- description: |
- Set SSH authentications used when building service images. (use 'default' for using your default SSH Agent)
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: build-arg
+ value_type: stringArray
+ default_value: '[]'
+ description: Set build-time variables for services
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: builder
+ value_type: string
+ description: Set builder to use
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: check
+ value_type: bool
+ default_value: "false"
+ description: Check build configuration
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: compress
+ value_type: bool
+ default_value: "true"
+ description: Compress the build context using gzip. DEPRECATED
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: force-rm
+ value_type: bool
+ default_value: "true"
+ description: Always remove intermediate containers. DEPRECATED
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: memory
+ shorthand: m
+ value_type: bytes
+ default_value: "0"
+ description: |
+ Set memory limit for the build container. Not supported by BuildKit.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-cache
+ value_type: bool
+ default_value: "false"
+ description: Do not use cache when building the image
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-rm
+ value_type: bool
+ default_value: "false"
+ description: |
+ Do not remove intermediate containers after a successful build. DEPRECATED
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: parallel
+ value_type: bool
+ default_value: "true"
+ description: Build images in parallel. DEPRECATED
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: print
+ value_type: bool
+ default_value: "false"
+ description: Print equivalent bake file
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: progress
+ value_type: string
+ description: Set type of ui output (auto, tty, plain, json, quiet)
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: provenance
+ value_type: string
+ description: Add a provenance attestation
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: pull
+ value_type: bool
+ default_value: "false"
+ description: Always attempt to pull a newer version of the image
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: push
+ value_type: bool
+ default_value: "false"
+ description: Push service images
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet
+ shorthand: q
+ value_type: bool
+ default_value: "false"
+ description: Suppress the build output
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: sbom
+ value_type: string
+ description: Add a SBOM attestation
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: ssh
+ value_type: string
+ description: |
+ Set SSH authentications used when building service images. (use 'default' for using your default SSH Agent)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: with-dependencies
+ value_type: bool
+ default_value: "false"
+ description: Also build dependencies (transitively)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_commit.yaml b/docs/reference/docker_compose_commit.yaml
new file mode 100644
index 00000000000..95f4834a97b
--- /dev/null
+++ b/docs/reference/docker_compose_commit.yaml
@@ -0,0 +1,76 @@
+command: docker compose commit
+short: Create a new image from a service container's changes
+long: Create a new image from a service container's changes
+usage: docker compose commit [OPTIONS] SERVICE [REPOSITORY[:TAG]]
+pname: docker compose
+plink: docker_compose.yaml
+options:
+ - option: author
+ shorthand: a
+ value_type: string
+ description: Author (e.g., "John Hannibal Smith ")
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: change
+ shorthand: c
+ value_type: list
+ description: Apply Dockerfile instruction to the created image
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: index
+ value_type: int
+ default_value: "0"
+ description: index of the container if service has multiple replicas.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: message
+ shorthand: m
+ value_type: string
+ description: Commit message
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: pause
+ shorthand: p
+ value_type: bool
+ default_value: "true"
+ description: Pause container during commit
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_config.yaml b/docs/reference/docker_compose_config.yaml
new file mode 100644
index 00000000000..3efc922b219
--- /dev/null
+++ b/docs/reference/docker_compose_config.yaml
@@ -0,0 +1,218 @@
+command: docker compose config
+short: Parse, resolve and render compose file in canonical format
+long: |-
+ `docker compose config` renders the actual data model to be applied on the Docker Engine.
+ It merges the Compose files set by `-f` flags, resolves variables in the Compose file, and expands short-notation into
+ the canonical format.
+usage: docker compose config [OPTIONS] [SERVICE...]
+pname: docker compose
+plink: docker_compose.yaml
+options:
+ - option: environment
+ value_type: bool
+ default_value: "false"
+ description: Print environment used for interpolation.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: format
+ value_type: string
+ description: 'Format the output. Values: [yaml | json]'
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: hash
+ value_type: string
+ description: Print the service config hash, one per line.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: images
+ value_type: bool
+ default_value: "false"
+ description: Print the image names, one per line.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: lock-image-digests
+ value_type: bool
+ default_value: "false"
+ description: Produces an override file with image digests
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: models
+ value_type: bool
+ default_value: "false"
+ description: Print the model names, one per line.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: networks
+ value_type: bool
+ default_value: "false"
+ description: Print the network names, one per line.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-consistency
+ value_type: bool
+ default_value: "false"
+ description: |
+ Don't check model consistency - warning: may produce invalid Compose output
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-env-resolution
+ value_type: bool
+ default_value: "false"
+ description: Don't resolve service env files
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-interpolate
+ value_type: bool
+ default_value: "false"
+ description: Don't interpolate environment variables
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-normalize
+ value_type: bool
+ default_value: "false"
+ description: Don't normalize compose model
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-path-resolution
+ value_type: bool
+ default_value: "false"
+ description: Don't resolve file paths
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: output
+ shorthand: o
+ value_type: string
+ description: Save to file (default to stdout)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: profiles
+ value_type: bool
+ default_value: "false"
+ description: Print the profile names, one per line.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet
+ shorthand: q
+ value_type: bool
+ default_value: "false"
+ description: Only validate the configuration, don't print anything
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: resolve-image-digests
+ value_type: bool
+ default_value: "false"
+ description: Pin image tags to digests
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: services
+ value_type: bool
+ default_value: "false"
+ description: Print the service names, one per line.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: variables
+ value_type: bool
+ default_value: "false"
+ description: Print model variables and default values.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: volumes
+ value_type: bool
+ default_value: "false"
+ description: Print the volume names, one per line.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_convert.yaml b/docs/reference/docker_compose_convert.yaml
index 2c23e2e0436..d1913221968 100644
--- a/docs/reference/docker_compose_convert.yaml
+++ b/docs/reference/docker_compose_convert.yaml
@@ -1,126 +1,137 @@
command: docker compose convert
-aliases: config
+aliases: docker compose convert, docker compose config
short: Converts the compose file to platform's canonical format
long: |-
- `docker compose convert` render the actual data model to be applied on target platform. When used with Docker engine,
- it merges the Compose files set by `-f` flags, resolves variables in Compose file, and expands short-notation into
- fully defined Compose model.
+ `docker compose convert` renders the actual data model to be applied on the target platform. When used with the Docker engine,
+ it merges the Compose files set by `-f` flags, resolves variables in the Compose file, and expands short-notation into
+ the canonical format.
- To allow smooth migration from docker-compose, this subcommand declares alias `docker compose config`
-usage: docker compose convert SERVICES
+ To allow smooth migration from docker-compose, this subcommand declares alias `docker compose config`
+usage: docker compose convert [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: format
- value_type: string
- default_value: yaml
- description: 'Format the output. Values: [yaml | json]'
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: hash
- value_type: string
- description: Print the service config hash, one per line.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: images
- value_type: bool
- default_value: "false"
- description: Print the image names, one per line.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-interpolate
- value_type: bool
- default_value: "false"
- description: Don't interpolate environment variables.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-normalize
- value_type: bool
- default_value: "false"
- description: Don't normalize compose model.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: output
- shorthand: o
- value_type: string
- description: Save to file (default to stdout)
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: profiles
- value_type: bool
- default_value: "false"
- description: Print the profile names, one per line.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: quiet
- shorthand: q
- value_type: bool
- default_value: "false"
- description: Only validate the configuration, don't print anything.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: resolve-image-digests
- value_type: bool
- default_value: "false"
- description: Pin image tags to digests.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: services
- value_type: bool
- default_value: "false"
- description: Print the service names, one per line.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: volumes
- value_type: bool
- default_value: "false"
- description: Print the volume names, one per line.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: format
+ value_type: string
+ default_value: yaml
+ description: 'Format the output. Values: [yaml | json]'
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: hash
+ value_type: string
+ description: Print the service config hash, one per line.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: images
+ value_type: bool
+ default_value: "false"
+ description: Print the image names, one per line.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-consistency
+ value_type: bool
+ default_value: "false"
+ description: |
+ Don't check model consistency - warning: may produce invalid Compose output
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-interpolate
+ value_type: bool
+ default_value: "false"
+ description: Don't interpolate environment variables.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-normalize
+ value_type: bool
+ default_value: "false"
+ description: Don't normalize compose model.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: output
+ shorthand: o
+ value_type: string
+ description: Save to file (default to stdout)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: profiles
+ value_type: bool
+ default_value: "false"
+ description: Print the profile names, one per line.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet
+ shorthand: q
+ value_type: bool
+ default_value: "false"
+ description: Only validate the configuration, don't print anything.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: resolve-image-digests
+ value_type: bool
+ default_value: "false"
+ description: Pin image tags to digests.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: services
+ value_type: bool
+ default_value: "false"
+ description: Print the service names, one per line.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: volumes
+ value_type: bool
+ default_value: "false"
+ description: Print the volume names, one per line.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
experimental: false
experimentalcli: false
diff --git a/docs/reference/docker_compose_cp.yaml b/docs/reference/docker_compose_cp.yaml
index 461f9a5dc8c..24f6aec87f9 100644
--- a/docs/reference/docker_compose_cp.yaml
+++ b/docs/reference/docker_compose_cp.yaml
@@ -1,55 +1,67 @@
command: docker compose cp
short: Copy files/folders between a service container and the local filesystem
long: Copy files/folders between a service container and the local filesystem
-usage: "docker compose cp [OPTIONS] SERVICE:SRC_PATH DEST_PATH|-\n\tdocker compose
- cp [OPTIONS] SRC_PATH|- SERVICE:DEST_PATH"
+usage: |-
+ docker compose cp [OPTIONS] SERVICE:SRC_PATH DEST_PATH|-
+ docker compose cp [OPTIONS] SRC_PATH|- SERVICE:DEST_PATH
pname: docker compose
plink: docker_compose.yaml
options:
-- option: all
- value_type: bool
- default_value: "false"
- description: Copy to all the containers of the service.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: archive
- shorthand: a
- value_type: bool
- default_value: "false"
- description: Archive mode (copy all uid/gid information)
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: follow-link
- shorthand: L
- value_type: bool
- default_value: "false"
- description: Always follow symbol link in SRC_PATH
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: index
- value_type: int
- default_value: "1"
- description: |
- Index of the container if there are multiple instances of a service [default: 1].
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: all
+ value_type: bool
+ default_value: "false"
+ description: Include containers created by the run command
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: archive
+ shorthand: a
+ value_type: bool
+ default_value: "false"
+ description: Archive mode (copy all uid/gid information)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: follow-link
+ shorthand: L
+ value_type: bool
+ default_value: "false"
+ description: Always follow symbol link in SRC_PATH
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: index
+ value_type: int
+ default_value: "0"
+ description: Index of the container if service has multiple replicas
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_create.yaml b/docs/reference/docker_compose_create.yaml
index f71ea7b7a8c..f6ab1b86824 100644
--- a/docs/reference/docker_compose_create.yaml
+++ b/docs/reference/docker_compose_create.yaml
@@ -1,53 +1,117 @@
command: docker compose create
-short: Creates containers for a service.
-long: Creates containers for a service.
-usage: docker compose create [SERVICE...]
+short: Creates containers for a service
+long: Creates containers for a service
+usage: docker compose create [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: build
- value_type: bool
- default_value: "false"
- description: Build images before starting containers.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: force-recreate
- value_type: bool
- default_value: "false"
- description: |
- Recreate containers even if their configuration and image haven't changed.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-build
- value_type: bool
- default_value: "false"
- description: Don't build an image, even if it's missing.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-recreate
- value_type: bool
- default_value: "false"
- description: |
- If containers already exist, don't recreate them. Incompatible with --force-recreate.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: build
+ value_type: bool
+ default_value: "false"
+ description: Build images before starting containers
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: force-recreate
+ value_type: bool
+ default_value: "false"
+ description: |
+ Recreate containers even if their configuration and image haven't changed
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-build
+ value_type: bool
+ default_value: "false"
+ description: Don't build an image, even if it's policy
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-recreate
+ value_type: bool
+ default_value: "false"
+ description: |
+ If containers already exist, don't recreate them. Incompatible with --force-recreate.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: pull
+ value_type: string
+ default_value: policy
+ description: Pull image before running ("always"|"missing"|"never"|"build")
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet-pull
+ value_type: bool
+ default_value: "false"
+ description: Pull without printing progress information
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: remove-orphans
+ value_type: bool
+ default_value: "false"
+ description: Remove containers for services not defined in the Compose file
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: scale
+ value_type: stringArray
+ default_value: '[]'
+ description: |
+ Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: "yes"
+ shorthand: "y"
+ value_type: bool
+ default_value: "false"
+ description: Assume "yes" as answer to all prompts and run non-interactively
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_down.yaml b/docs/reference/docker_compose_down.yaml
index 95ed4c1f049..77bf526289b 100644
--- a/docs/reference/docker_compose_down.yaml
+++ b/docs/reference/docker_compose_down.yaml
@@ -1,67 +1,79 @@
command: docker compose down
short: Stop and remove containers, networks
long: |-
- Stops containers and removes containers, networks, volumes, and images created by `up`.
+ Stops containers and removes containers, networks, volumes, and images created by `up`.
- By default, the only things removed are:
+ By default, the only things removed are:
- - Containers for services defined in the Compose file
- - Networks defined in the networks section of the Compose file
- - The default network, if one is used
+ - Containers for services defined in the Compose file.
+ - Networks defined in the networks section of the Compose file.
+ - The default network, if one is used.
- Networks and volumes defined as external are never removed.
+ Networks and volumes defined as external are never removed.
- Anonymous volumes are not removed by default. However, as they don’t have a stable name, they will not be automatically
- mounted by a subsequent `up`. For data that needs to persist between updates, use explicit paths as bind mounts or
- named volumes.
-usage: docker compose down
+ Anonymous volumes are not removed by default. However, as they don’t have a stable name, they are not automatically
+ mounted by a subsequent `up`. For data that needs to persist between updates, use explicit paths as bind mounts or
+ named volumes.
+usage: docker compose down [OPTIONS] [SERVICES]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: remove-orphans
- value_type: bool
- default_value: "false"
- description: Remove containers for services not defined in the Compose file.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: rmi
- value_type: string
- description: |
- Remove images used by services. "local" remove only images that don't have a custom tag ("local"|"all")
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: timeout
- shorthand: t
- value_type: int
- default_value: "10"
- description: Specify a shutdown timeout in seconds
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: volumes
- shorthand: v
- value_type: bool
- default_value: "false"
- description: |
- Remove named volumes declared in the `volumes` section of the Compose file and anonymous volumes attached to containers.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: remove-orphans
+ value_type: bool
+ default_value: "false"
+ description: Remove containers for services not defined in the Compose file
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: rmi
+ value_type: string
+ description: |
+ Remove images used by services. "local" remove only images that don't have a custom tag ("local"|"all")
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: timeout
+ shorthand: t
+ value_type: int
+ default_value: "0"
+ description: Specify a shutdown timeout in seconds
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: volumes
+ shorthand: v
+ value_type: bool
+ default_value: "false"
+ description: |
+ Remove named volumes declared in the "volumes" section of the Compose file and anonymous volumes attached to containers
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_events.yaml b/docs/reference/docker_compose_events.yaml
index 1f0f8250f22..7c4cb4297f9 100644
--- a/docs/reference/docker_compose_events.yaml
+++ b/docs/reference/docker_compose_events.yaml
@@ -1,40 +1,70 @@
command: docker compose events
-short: Receive real time events from containers.
+short: Receive real time events from containers
long: |-
- Stream container events for every container in the project.
+ Stream container events for every container in the project.
- With the `--json` flag, a json object is printed one per line with the format:
+ With the `--json` flag, a json object is printed one per line with the format:
- ```json
- {
- "time": "2015-11-20T18:01:03.615550",
- "type": "container",
- "action": "create",
- "id": "213cf7...5fc39a",
- "service": "web",
- "attributes": {
- "name": "application_web_1",
- "image": "alpine:edge"
- }
- }
- ```
+ ```json
+ {
+ "time": "2015-11-20T18:01:03.615550",
+ "type": "container",
+ "action": "create",
+ "id": "213cf7...5fc39a",
+ "service": "web",
+ "attributes": {
+ "name": "application_web_1",
+ "image": "alpine:edge"
+ }
+ }
+ ```
- The events that can be received using this can be seen [here](/engine/reference/commandline/events/#object-types).
-usage: docker compose events [options] [--] [SERVICE...]
+ The events that can be received using this can be seen [here](/reference/cli/docker/system/events/#object-types).
+usage: docker compose events [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: json
- value_type: bool
- default_value: "false"
- description: Output events as a stream of json objects
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: json
+ value_type: bool
+ default_value: "false"
+ description: Output events as a stream of json objects
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: since
+ value_type: string
+ description: Show all events created since timestamp
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: until
+ value_type: string
+ description: Stream events until this timestamp
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_exec.yaml b/docs/reference/docker_compose_exec.yaml
index 5f70743f8ec..66ecfddab8d 100644
--- a/docs/reference/docker_compose_exec.yaml
+++ b/docs/reference/docker_compose_exec.yaml
@@ -1,112 +1,129 @@
command: docker compose exec
-short: Execute a command in a running container.
+short: Execute a command in a running container
long: |-
- This is the equivalent of `docker exec` targeting a Compose service.
+ This is the equivalent of `docker exec` targeting a Compose service.
- With this subcommand you can run arbitrary commands in your services. Commands are by default allocating a TTY, so
- you can use a command such as `docker compose exec web sh` to get an interactive prompt.
-usage: docker compose exec [options] [-e KEY=VAL...] [--] SERVICE COMMAND [ARGS...]
+ With this subcommand, you can run arbitrary commands in your services. Commands allocate a TTY by default, so
+ you can use a command such as `docker compose exec web sh` to get an interactive prompt.
+
+ By default, Compose will enter container in interactive mode and allocate a TTY, while the equivalent `docker exec`
+ command requires passing `--interactive --tty` flags to get the same behavior. Compose also support those two flags
+ to offer a smooth migration between commands, whenever they are no-op by default. Still, `interactive` can be used to
+ force disabling interactive mode (`--interactive=false`), typically when `docker compose exec` command is used inside
+ a script.
+usage: docker compose exec [OPTIONS] SERVICE COMMAND [ARGS...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: detach
- shorthand: d
- value_type: bool
- default_value: "false"
- description: 'Detached mode: Run command in the background.'
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: env
- shorthand: e
- value_type: stringArray
- default_value: '[]'
- description: Set environment variables
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: index
- value_type: int
- default_value: "1"
- description: |
- index of the container if there are multiple instances of a service [default: 1].
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: interactive
- shorthand: i
- value_type: bool
- default_value: "true"
- description: Keep STDIN open even if not attached.
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-TTY
- shorthand: T
- value_type: bool
- default_value: "true"
- description: |
- Disable pseudo-TTY allocation. By default `docker compose exec` allocates a TTY.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: privileged
- value_type: bool
- default_value: "false"
- description: Give extended privileges to the process.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: tty
- shorthand: t
- value_type: bool
- default_value: "true"
- description: Allocate a pseudo-TTY.
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: user
- shorthand: u
- value_type: string
- description: Run the command as this user.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: workdir
- shorthand: w
- value_type: string
- description: Path to workdir directory for this command.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: detach
+ shorthand: d
+ value_type: bool
+ default_value: "false"
+ description: 'Detached mode: Run command in the background'
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: env
+ shorthand: e
+ value_type: stringArray
+ default_value: '[]'
+ description: Set environment variables
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: index
+ value_type: int
+ default_value: "0"
+ description: Index of the container if service has multiple replicas
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: interactive
+ shorthand: i
+ value_type: bool
+ default_value: "true"
+ description: Keep STDIN open even if not attached
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-tty
+ shorthand: T
+ value_type: bool
+ default_value: "true"
+ description: |
+ Disable pseudo-TTY allocation. By default 'docker compose exec' allocates a TTY.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: privileged
+ value_type: bool
+ default_value: "false"
+ description: Give extended privileges to the process
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: tty
+ shorthand: t
+ value_type: bool
+ default_value: "true"
+ description: Allocate a pseudo-TTY
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: user
+ shorthand: u
+ value_type: string
+ description: Run the command as this user
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: workdir
+ shorthand: w
+ value_type: string
+ description: Path to workdir directory for this command
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_export.yaml b/docs/reference/docker_compose_export.yaml
new file mode 100644
index 00000000000..5dfb3be0a47
--- /dev/null
+++ b/docs/reference/docker_compose_export.yaml
@@ -0,0 +1,45 @@
+command: docker compose export
+short: Export a service container's filesystem as a tar archive
+long: Export a service container's filesystem as a tar archive
+usage: docker compose export [OPTIONS] SERVICE
+pname: docker compose
+plink: docker_compose.yaml
+options:
+ - option: index
+ value_type: int
+ default_value: "0"
+ description: index of the container if service has multiple replicas.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: output
+ shorthand: o
+ value_type: string
+ description: Write to a file, instead of STDOUT
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_images.yaml b/docs/reference/docker_compose_images.yaml
index 8e263aef275..33187df42d6 100644
--- a/docs/reference/docker_compose_images.yaml
+++ b/docs/reference/docker_compose_images.yaml
@@ -1,22 +1,44 @@
command: docker compose images
short: List images used by the created containers
long: List images used by the created containers
-usage: docker compose images [SERVICE...]
+usage: docker compose images [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: quiet
- shorthand: q
- value_type: bool
- default_value: "false"
- description: Only display IDs
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: format
+ value_type: string
+ default_value: table
+ description: 'Format the output. Values: [table | json]'
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet
+ shorthand: q
+ value_type: bool
+ default_value: "false"
+ description: Only display IDs
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_kill.yaml b/docs/reference/docker_compose_kill.yaml
index f933478281d..fbffcc9cb89 100644
--- a/docs/reference/docker_compose_kill.yaml
+++ b/docs/reference/docker_compose_kill.yaml
@@ -1,27 +1,49 @@
command: docker compose kill
-short: Force stop service containers.
+short: Force stop service containers
long: |-
- Forces running containers to stop by sending a `SIGKILL` signal. Optionally the signal can be passed, for example:
+ Forces running containers to stop by sending a `SIGKILL` signal. Optionally the signal can be passed, for example:
- ```console
- $ docker-compose kill -s SIGINT
- ```
-usage: docker compose kill [options] [SERVICE...]
+ ```console
+ $ docker compose kill -s SIGINT
+ ```
+usage: docker compose kill [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: signal
- shorthand: s
- value_type: string
- default_value: SIGKILL
- description: SIGNAL to send to the container.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: remove-orphans
+ value_type: bool
+ default_value: "false"
+ description: Remove containers for services not defined in the Compose file
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: signal
+ shorthand: s
+ value_type: string
+ default_value: SIGKILL
+ description: SIGNAL to send to the container
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_logs.yaml b/docs/reference/docker_compose_logs.yaml
index 2846baccec6..92d94dd108c 100644
--- a/docs/reference/docker_compose_logs.yaml
+++ b/docs/reference/docker_compose_logs.yaml
@@ -1,84 +1,107 @@
command: docker compose logs
short: View output from containers
-long: Displays log output from services.
-usage: docker compose logs [SERVICE...]
+long: Displays log output from services
+usage: docker compose logs [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: follow
- shorthand: f
- value_type: bool
- default_value: "false"
- description: Follow log output.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-color
- value_type: bool
- default_value: "false"
- description: Produce monochrome output.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-log-prefix
- value_type: bool
- default_value: "false"
- description: Don't print prefix in logs.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: since
- value_type: string
- description: |
- Show logs since timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: tail
- value_type: string
- default_value: all
- description: |
- Number of lines to show from the end of the logs for each container.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: timestamps
- shorthand: t
- value_type: bool
- default_value: "false"
- description: Show timestamps.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: until
- value_type: string
- description: |
- Show logs before a timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: follow
+ shorthand: f
+ value_type: bool
+ default_value: "false"
+ description: Follow log output
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: index
+ value_type: int
+ default_value: "0"
+ description: index of the container if service has multiple replicas
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-color
+ value_type: bool
+ default_value: "false"
+ description: Produce monochrome output
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-log-prefix
+ value_type: bool
+ default_value: "false"
+ description: Don't print prefix in logs
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: since
+ value_type: string
+ description: |
+ Show logs since timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: tail
+ shorthand: "n"
+ value_type: string
+ default_value: all
+ description: |
+ Number of lines to show from the end of the logs for each container
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: timestamps
+ shorthand: t
+ value_type: bool
+ default_value: "false"
+ description: Show timestamps
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: until
+ value_type: string
+ description: |
+ Show logs before a timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_ls.yaml b/docs/reference/docker_compose_ls.yaml
index 97ce1364ee1..dd6418c652f 100644
--- a/docs/reference/docker_compose_ls.yaml
+++ b/docs/reference/docker_compose_ls.yaml
@@ -1,52 +1,64 @@
command: docker compose ls
short: List running compose projects
-long: List Compose projects running on platform.
-usage: docker compose ls
+long: Lists running Compose projects
+usage: docker compose ls [OPTIONS]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: all
- shorthand: a
- value_type: bool
- default_value: "false"
- description: Show all stopped Compose projects
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: filter
- value_type: filter
- description: Filter output based on conditions provided.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: format
- value_type: string
- default_value: pretty
- description: 'Format the output. Values: [pretty | json].'
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: quiet
- shorthand: q
- value_type: bool
- default_value: "false"
- description: Only display IDs.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: all
+ shorthand: a
+ value_type: bool
+ default_value: "false"
+ description: Show all stopped Compose projects
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: filter
+ value_type: filter
+ description: Filter output based on conditions provided
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: format
+ value_type: string
+ default_value: table
+ description: 'Format the output. Values: [table | json]'
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet
+ shorthand: q
+ value_type: bool
+ default_value: "false"
+ description: Only display project names
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_pause.yaml b/docs/reference/docker_compose_pause.yaml
index 95342fda0c2..2ae1c402792 100644
--- a/docs/reference/docker_compose_pause.yaml
+++ b/docs/reference/docker_compose_pause.yaml
@@ -1,11 +1,23 @@
command: docker compose pause
short: Pause services
long: |
- Pauses running containers of a service. They can be unpaused with `docker compose unpause`.
+ Pauses running containers of a service. They can be unpaused with `docker compose unpause`.
usage: docker compose pause [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_port.yaml b/docs/reference/docker_compose_port.yaml
index 739d3d7ea5a..8a07f31ea50 100644
--- a/docs/reference/docker_compose_port.yaml
+++ b/docs/reference/docker_compose_port.yaml
@@ -1,31 +1,43 @@
command: docker compose port
-short: Print the public port for a port binding.
-long: Prints the public port for a port binding.
-usage: docker compose port [options] [--] SERVICE PRIVATE_PORT
+short: Print the public port for a port binding
+long: Prints the public port for a port binding
+usage: docker compose port [OPTIONS] SERVICE PRIVATE_PORT
pname: docker compose
plink: docker_compose.yaml
options:
-- option: index
- value_type: int
- default_value: "1"
- description: index of the container if service has multiple replicas
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: protocol
- value_type: string
- default_value: tcp
- description: tcp or udp
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: index
+ value_type: int
+ default_value: "0"
+ description: Index of the container if service has multiple replicas
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: protocol
+ value_type: string
+ default_value: tcp
+ description: tcp or udp
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_ps.yaml b/docs/reference/docker_compose_ps.yaml
index 896f306944b..d0370275695 100644
--- a/docs/reference/docker_compose_ps.yaml
+++ b/docs/reference/docker_compose_ps.yaml
@@ -1,171 +1,212 @@
command: docker compose ps
short: List containers
long: |-
- Lists containers for a Compose project, with current status and exposed ports.
- By default, both running and stopped containers are shown:
-
- ```console
- $ docker compose ps
- NAME COMMAND SERVICE STATUS PORTS
- example-bar-1 "/docker-entrypoint.…" bar exited (0)
- example-foo-1 "/docker-entrypoint.…" foo running 0.0.0.0:8080->80/tcp
- ```
-usage: docker compose ps [SERVICE...]
+ Lists containers for a Compose project, with current status and exposed ports.
+
+ ```console
+ $ docker compose ps
+ NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
+ example-foo-1 alpine "/entrypoint.…" foo 4 seconds ago Up 2 seconds 0.0.0.0:8080->80/tcp
+ ```
+
+ By default, only running containers are shown. `--all` flag can be used to include stopped containers.
+
+ ```console
+ $ docker compose ps --all
+ NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
+ example-foo-1 alpine "/entrypoint.…" foo 4 seconds ago Up 2 seconds 0.0.0.0:8080->80/tcp
+ example-bar-1 alpine "/entrypoint.…" bar 4 seconds ago exited (0)
+ ```
+usage: docker compose ps [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: all
- shorthand: a
- value_type: bool
- default_value: "false"
- description: |
- Show all stopped containers (including those created by the run command)
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: filter
- value_type: string
- description: 'Filter services by a property (supported filters: status).'
- details_url: '#filter'
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: format
- value_type: string
- default_value: pretty
- description: 'Format the output. Values: [pretty | json]'
- details_url: '#format'
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: quiet
- shorthand: q
- value_type: bool
- default_value: "false"
- description: Only display IDs
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: services
- value_type: bool
- default_value: "false"
- description: Display services
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: status
- value_type: stringArray
- default_value: '[]'
- description: |
- Filter services by status. Values: [paused | restarting | removing | running | dead | created | exited]
- details_url: '#status'
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: all
+ shorthand: a
+ value_type: bool
+ default_value: "false"
+ description: |
+ Show all stopped containers (including those created by the run command)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: filter
+ value_type: string
+ description: 'Filter services by a property (supported filters: status)'
+ details_url: '#filter'
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: format
+ value_type: string
+ default_value: table
+ description: |-
+ Format output using a custom template:
+ 'table': Print output in table format with column headers (default)
+ 'table TEMPLATE': Print output in table format using the given Go template
+ 'json': Print in JSON format
+ 'TEMPLATE': Print output using the given Go template.
+ Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates
+ details_url: '#format'
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-trunc
+ value_type: bool
+ default_value: "false"
+ description: Don't truncate output
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: orphans
+ value_type: bool
+ default_value: "true"
+ description: Include orphaned services (not declared by project)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet
+ shorthand: q
+ value_type: bool
+ default_value: "false"
+ description: Only display IDs
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: services
+ value_type: bool
+ default_value: "false"
+ description: Display services
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: status
+ value_type: stringArray
+ default_value: '[]'
+ description: |
+ Filter services by status. Values: [paused | restarting | removing | running | dead | created | exited]
+ details_url: '#status'
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
examples: |-
- ### Format the output (--format) {#format}
-
- By default, the `docker compose ps` command uses a table ("pretty") format to
- show the containers. The `--format` flag allows you to specify alternative
- presentations for the output. Currently supported options are `pretty` (default),
- and `json`, which outputs information about the containers as a JSON array:
-
- ```console
- $ docker compose ps --format json
- [{"ID":"1553b0236cf4d2715845f053a4ee97042c4f9a2ef655731ee34f1f7940eaa41a","Name":"example-bar-1","Command":"/docker-entrypoint.sh nginx -g 'daemon off;'","Project":"example","Service":"bar","State":"exited","Health":"","ExitCode":0,"Publishers":null},{"ID":"f02a4efaabb67416e1ff127d51c4b5578634a0ad5743bd65225ff7d1909a3fa0","Name":"example-foo-1","Command":"/docker-entrypoint.sh nginx -g 'daemon off;'","Project":"example","Service":"foo","State":"running","Health":"","ExitCode":0,"Publishers":[{"URL":"0.0.0.0","TargetPort":80,"PublishedPort":8080,"Protocol":"tcp"}]}]
- ```
-
- The JSON output allows you to use the information in other tools for further
- processing, for example, using the [`jq` utility](https://stedolan.github.io/jq/){:target="_blank" rel="noopener" class="_"}
- to pretty-print the JSON:
-
- ```console
- $ docker compose ps --format json | jq .
- [
- {
- "ID": "1553b0236cf4d2715845f053a4ee97042c4f9a2ef655731ee34f1f7940eaa41a",
- "Name": "example-bar-1",
- "Command": "/docker-entrypoint.sh nginx -g 'daemon off;'",
- "Project": "example",
- "Service": "bar",
- "State": "exited",
- "Health": "",
- "ExitCode": 0,
- "Publishers": null
- },
- {
- "ID": "f02a4efaabb67416e1ff127d51c4b5578634a0ad5743bd65225ff7d1909a3fa0",
- "Name": "example-foo-1",
- "Command": "/docker-entrypoint.sh nginx -g 'daemon off;'",
- "Project": "example",
- "Service": "foo",
- "State": "running",
- "Health": "",
- "ExitCode": 0,
- "Publishers": [
- {
- "URL": "0.0.0.0",
- "TargetPort": 80,
- "PublishedPort": 8080,
- "Protocol": "tcp"
- }
- ]
- }
- ]
- ```
-
- ### Filter containers by status (--status) {#status}
-
- Use the `--status` flag to filter the list of containers by status. For example,
- to show only containers that are running, or only containers that have exited:
-
- ```console
- $ docker compose ps --status=running
- NAME COMMAND SERVICE STATUS PORTS
- example-foo-1 "/docker-entrypoint.…" foo running 0.0.0.0:8080->80/tcp
-
- $ docker compose ps --status=exited
- NAME COMMAND SERVICE STATUS PORTS
- example-bar-1 "/docker-entrypoint.…" bar exited (0)
- ```
-
- ### Filter containers by status (--filter) {#filter}
-
- The [`--status` flag](#status) is a convenience shorthand for the `--filter status=`
- flag. The example below is the equivalent to the example from the previous section,
- this time using the `--filter` flag:
-
- ```console
- $ docker compose ps --filter status=running
- NAME COMMAND SERVICE STATUS PORTS
- example-foo-1 "/docker-entrypoint.…" foo running 0.0.0.0:8080->80/tcp
-
- $ docker compose ps --filter status=running
- NAME COMMAND SERVICE STATUS PORTS
- example-bar-1 "/docker-entrypoint.…" bar exited (0)
- ```
-
- The `docker compose ps` command currently only supports the `--filter status=`
- option, but additional filter options may be added in future.
+ ### Format the output (--format) {#format}
+
+ By default, the `docker compose ps` command uses a table ("pretty") format to
+ show the containers. The `--format` flag allows you to specify alternative
+ presentations for the output. Currently, supported options are `pretty` (default),
+ and `json`, which outputs information about the containers as a JSON array:
+
+ ```console
+ $ docker compose ps --format json
+ [{"ID":"1553b0236cf4d2715845f053a4ee97042c4f9a2ef655731ee34f1f7940eaa41a","Name":"example-bar-1","Command":"/docker-entrypoint.sh nginx -g 'daemon off;'","Project":"example","Service":"bar","State":"exited","Health":"","ExitCode":0,"Publishers":null},{"ID":"f02a4efaabb67416e1ff127d51c4b5578634a0ad5743bd65225ff7d1909a3fa0","Name":"example-foo-1","Command":"/docker-entrypoint.sh nginx -g 'daemon off;'","Project":"example","Service":"foo","State":"running","Health":"","ExitCode":0,"Publishers":[{"URL":"0.0.0.0","TargetPort":80,"PublishedPort":8080,"Protocol":"tcp"}]}]
+ ```
+
+ The JSON output allows you to use the information in other tools for further
+ processing, for example, using the [`jq` utility](https://stedolan.github.io/jq/)
+ to pretty-print the JSON:
+
+ ```console
+ $ docker compose ps --format json | jq .
+ [
+ {
+ "ID": "1553b0236cf4d2715845f053a4ee97042c4f9a2ef655731ee34f1f7940eaa41a",
+ "Name": "example-bar-1",
+ "Command": "/docker-entrypoint.sh nginx -g 'daemon off;'",
+ "Project": "example",
+ "Service": "bar",
+ "State": "exited",
+ "Health": "",
+ "ExitCode": 0,
+ "Publishers": null
+ },
+ {
+ "ID": "f02a4efaabb67416e1ff127d51c4b5578634a0ad5743bd65225ff7d1909a3fa0",
+ "Name": "example-foo-1",
+ "Command": "/docker-entrypoint.sh nginx -g 'daemon off;'",
+ "Project": "example",
+ "Service": "foo",
+ "State": "running",
+ "Health": "",
+ "ExitCode": 0,
+ "Publishers": [
+ {
+ "URL": "0.0.0.0",
+ "TargetPort": 80,
+ "PublishedPort": 8080,
+ "Protocol": "tcp"
+ }
+ ]
+ }
+ ]
+ ```
+
+ ### Filter containers by status (--status) {#status}
+
+ Use the `--status` flag to filter the list of containers by status. For example,
+ to show only containers that are running or only containers that have exited:
+
+ ```console
+ $ docker compose ps --status=running
+ NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
+ example-foo-1 alpine "/entrypoint.…" foo 4 seconds ago Up 2 seconds 0.0.0.0:8080->80/tcp
+
+ $ docker compose ps --status=exited
+ NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
+ example-bar-1 alpine "/entrypoint.…" bar 4 seconds ago exited (0)
+ ```
+
+ ### Filter containers by status (--filter) {#filter}
+
+ The [`--status` flag](#status) is a convenient shorthand for the `--filter status=`
+ flag. The example below is the equivalent to the example from the previous section,
+ this time using the `--filter` flag:
+
+ ```console
+ $ docker compose ps --filter status=running
+ NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
+ example-foo-1 alpine "/entrypoint.…" foo 4 seconds ago Up 2 seconds 0.0.0.0:8080->80/tcp
+ ```
+
+ The `docker compose ps` command currently only supports the `--filter status=`
+ option, but additional filter options may be added in the future.
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_publish.yaml b/docs/reference/docker_compose_publish.yaml
new file mode 100644
index 00000000000..c3189d89c57
--- /dev/null
+++ b/docs/reference/docker_compose_publish.yaml
@@ -0,0 +1,86 @@
+command: docker compose publish
+short: Publish compose application
+long: Publish compose application
+usage: docker compose publish [OPTIONS] REPOSITORY[:TAG]
+pname: docker compose
+plink: docker_compose.yaml
+options:
+ - option: app
+ value_type: bool
+ default_value: "false"
+ description: Published compose application (includes referenced images)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: insecure-registry
+ value_type: bool
+ default_value: "false"
+ description: Use insecure registry
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: oci-version
+ value_type: string
+ description: |
+ OCI image/artifact specification version (automatically determined by default)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: resolve-image-digests
+ value_type: bool
+ default_value: "false"
+ description: Pin image tags to digests
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: with-env
+ value_type: bool
+ default_value: "false"
+ description: Include environment variables in the published OCI artifact
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: "yes"
+ shorthand: "y"
+ value_type: bool
+ default_value: "false"
+ description: Assume "yes" as answer to all prompts
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_pull.yaml b/docs/reference/docker_compose_pull.yaml
index c7da031ee6e..5b1316df132 100644
--- a/docs/reference/docker_compose_pull.yaml
+++ b/docs/reference/docker_compose_pull.yaml
@@ -1,105 +1,137 @@
command: docker compose pull
short: Pull service images
-long: |-
- Pulls an image associated with a service defined in a `compose.yaml` file, but does not start containers based on
- those images.
-usage: docker compose pull [SERVICE...]
+long: |
+ Pulls an image associated with a service defined in a `compose.yaml` file, but does not start containers based on those images
+usage: docker compose pull [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: ignore-pull-failures
- value_type: bool
- default_value: "false"
- description: Pull what it can and ignores images with pull failures
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: include-deps
- value_type: bool
- default_value: "false"
- description: Also pull services declared as dependencies
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-parallel
- value_type: bool
- default_value: "true"
- description: DEPRECATED disable parallel pulling.
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: parallel
- value_type: bool
- default_value: "true"
- description: DEPRECATED pull multiple images in parallel.
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: quiet
- shorthand: q
- value_type: bool
- default_value: "false"
- description: Pull without printing progress information
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: ignore-buildable
+ value_type: bool
+ default_value: "false"
+ description: Ignore images that can be built
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: ignore-pull-failures
+ value_type: bool
+ default_value: "false"
+ description: Pull what it can and ignores images with pull failures
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: include-deps
+ value_type: bool
+ default_value: "false"
+ description: Also pull services declared as dependencies
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-parallel
+ value_type: bool
+ default_value: "true"
+ description: DEPRECATED disable parallel pulling
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: parallel
+ value_type: bool
+ default_value: "true"
+ description: DEPRECATED pull multiple images in parallel
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: policy
+ value_type: string
+ description: Apply pull policy ("missing"|"always")
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet
+ shorthand: q
+ value_type: bool
+ default_value: "false"
+ description: Pull without printing progress information
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
examples: |-
- suppose you have this `compose.yaml`:
+ Consider the following `compose.yaml`:
- ```yaml
- services:
- db:
- image: postgres
- web:
- build: .
- command: bundle exec rails s -p 3000 -b '0.0.0.0'
- volumes:
- - .:/myapp
- ports:
- - "3000:3000"
- depends_on:
- - db
- ```
+ ```yaml
+ services:
+ db:
+ image: postgres
+ web:
+ build: .
+ command: bundle exec rails s -p 3000 -b '0.0.0.0'
+ volumes:
+ - .:/myapp
+ ports:
+ - "3000:3000"
+ depends_on:
+ - db
+ ```
- If you run `docker compose pull ServiceName` in the same directory as the `compose.yaml` file that defines the service,
- Docker pulls the associated image. For example, to call the postgres image configured as the db service in our example,
- you would run `docker compose pull db`.
+ If you run `docker compose pull ServiceName` in the same directory as the `compose.yaml` file that defines the service,
+ Docker pulls the associated image. For example, to call the postgres image configured as the db service in our example,
+ you would run `docker compose pull db`.
- ```console
- $ docker compose pull db
- [+] Running 1/15
- ⠸ db Pulling 12.4s
- ⠿ 45b42c59be33 Already exists 0.0s
- ⠹ 40adec129f1a Downloading 3.374MB/4.178MB 9.3s
- ⠹ b4c431d00c78 Download complete 9.3s
- ⠹ 2696974e2815 Download complete 9.3s
- ⠹ 564b77596399 Downloading 5.622MB/7.965MB 9.3s
- ⠹ 5044045cf6f2 Downloading 216.7kB/391.1kB 9.3s
- ⠹ d736e67e6ac3 Waiting 9.3s
- ⠹ 390c1c9a5ae4 Waiting 9.3s
- ⠹ c0e62f172284 Waiting 9.3s
- ⠹ ebcdc659c5bf Waiting 9.3s
- ⠹ 29be22cb3acc Waiting 9.3s
- ⠹ f63c47038e66 Waiting 9.3s
- ⠹ 77a0c198cde5 Waiting 9.3s
- ⠹ c8752d5b785c Waiting 9.3s
- ``̀`
+ ```console
+ $ docker compose pull db
+ [+] Running 1/15
+ ⠸ db Pulling 12.4s
+ ⠿ 45b42c59be33 Already exists 0.0s
+ ⠹ 40adec129f1a Downloading 3.374MB/4.178MB 9.3s
+ ⠹ b4c431d00c78 Download complete 9.3s
+ ⠹ 2696974e2815 Download complete 9.3s
+ ⠹ 564b77596399 Downloading 5.622MB/7.965MB 9.3s
+ ⠹ 5044045cf6f2 Downloading 216.7kB/391.1kB 9.3s
+ ⠹ d736e67e6ac3 Waiting 9.3s
+ ⠹ 390c1c9a5ae4 Waiting 9.3s
+ ⠹ c0e62f172284 Waiting 9.3s
+ ⠹ ebcdc659c5bf Waiting 9.3s
+ ⠹ 29be22cb3acc Waiting 9.3s
+ ⠹ f63c47038e66 Waiting 9.3s
+ ⠹ 77a0c198cde5 Waiting 9.3s
+ ⠹ c8752d5b785c Waiting 9.3s
+ ```
+
+ `docker compose pull` tries to pull image for services with a build section. If pull fails, it lets you know this service image must be built. You can skip this by setting `--ignore-buildable` flag.
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_push.yaml b/docs/reference/docker_compose_push.yaml
index f006ad615db..be7c116065f 100644
--- a/docs/reference/docker_compose_push.yaml
+++ b/docs/reference/docker_compose_push.yaml
@@ -1,39 +1,72 @@
command: docker compose push
short: Push service images
long: |-
- Pushes images for services to their respective registry/repository.
+ Pushes images for services to their respective registry/repository.
- The following assumptions are made:
- - You are pushing an image you have built locally
- - You have access to the build key
+ The following assumptions are made:
+ - You are pushing an image you have built locally
+ - You have access to the build key
- Examples
+ Examples
- ```yaml
- services:
- service1:
- build: .
- image: localhost:5000/yourimage ## goes to local registry
+ ```yaml
+ services:
+ service1:
+ build: .
+ image: localhost:5000/yourimage ## goes to local registry
- service2:
- build: .
- image: your-dockerid/yourimage ## goes to your repository on Docker Hub
- ```
-usage: docker compose push [SERVICE...]
+ service2:
+ build: .
+ image: your-dockerid/yourimage ## goes to your repository on Docker Hub
+ ```
+usage: docker compose push [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: ignore-push-failures
- value_type: bool
- default_value: "false"
- description: Push what it can and ignores images with push failures
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: ignore-push-failures
+ value_type: bool
+ default_value: "false"
+ description: Push what it can and ignores images with push failures
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: include-deps
+ value_type: bool
+ default_value: "false"
+ description: Also push images of services declared as dependencies
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet
+ shorthand: q
+ value_type: bool
+ default_value: "false"
+ description: Push without printing progress information
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_restart.yaml b/docs/reference/docker_compose_restart.yaml
index 9f646cc07ff..3bc0a3ad83a 100644
--- a/docs/reference/docker_compose_restart.yaml
+++ b/docs/reference/docker_compose_restart.yaml
@@ -1,32 +1,54 @@
command: docker compose restart
-short: Restart containers
+short: Restart service containers
long: |-
- Restarts all stopped and running services.
+ Restarts all stopped and running services, or the specified services only.
- If you make changes to your `compose.yml` configuration, these changes are not reflected
- after running this command. For example, changes to environment variables (which are added
- after a container is built, but before the container's command is executed) are not updated
- after restarting.
+ If you make changes to your `compose.yml` configuration, these changes are not reflected
+ after running this command. For example, changes to environment variables (which are added
+ after a container is built, but before the container's command is executed) are not updated
+ after restarting.
- If you are looking to configure a service's restart policy, please refer to
- [restart](https://github.com/compose-spec/compose-spec/blob/master/spec.md#restart)
- or [restart_policy](https://github.com/compose-spec/compose-spec/blob/master/deploy.md#restart_policy).
-usage: docker compose restart
+ If you are looking to configure a service's restart policy, refer to
+ [restart](https://github.com/compose-spec/compose-spec/blob/main/spec.md#restart)
+ or [restart_policy](https://github.com/compose-spec/compose-spec/blob/main/deploy.md#restart_policy).
+usage: docker compose restart [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: timeout
- shorthand: t
- value_type: int
- default_value: "10"
- description: Specify a shutdown timeout in seconds
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: no-deps
+ value_type: bool
+ default_value: "false"
+ description: Don't restart dependent services
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: timeout
+ shorthand: t
+ value_type: int
+ default_value: "0"
+ description: Specify a shutdown timeout in seconds
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_rm.yaml b/docs/reference/docker_compose_rm.yaml
index 48108dcdcaf..7ddafae4809 100644
--- a/docs/reference/docker_compose_rm.yaml
+++ b/docs/reference/docker_compose_rm.yaml
@@ -1,70 +1,82 @@
command: docker compose rm
short: Removes stopped service containers
long: |-
- Removes stopped service containers.
+ Removes stopped service containers.
- By default, anonymous volumes attached to containers are not removed. You can override this with `-v`. To list all
- volumes, use `docker volume ls`.
+ By default, anonymous volumes attached to containers are not removed. You can override this with `-v`. To list all
+ volumes, use `docker volume ls`.
- Any data which is not in a volume is lost.
+ Any data which is not in a volume is lost.
- Running the command with no options also removes one-off containers created by `docker compose run`:
+ Running the command with no options also removes one-off containers created by `docker compose run`:
- ```console
- $ docker compose rm
- Going to remove djangoquickstart_web_run_1
- Are you sure? [yN] y
- Removing djangoquickstart_web_run_1 ... done
- ```
-usage: docker compose rm [SERVICE...]
+ ```console
+ $ docker compose rm
+ Going to remove djangoquickstart_web_run_1
+ Are you sure? [yN] y
+ Removing djangoquickstart_web_run_1 ... done
+ ```
+usage: docker compose rm [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: all
- shorthand: a
- value_type: bool
- default_value: "false"
- description: Deprecated - no effect
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: force
- shorthand: f
- value_type: bool
- default_value: "false"
- description: Don't ask to confirm removal
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: stop
- shorthand: s
- value_type: bool
- default_value: "false"
- description: Stop the containers, if required, before removing
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: volumes
- shorthand: v
- value_type: bool
- default_value: "false"
- description: Remove any anonymous volumes attached to containers
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: all
+ shorthand: a
+ value_type: bool
+ default_value: "false"
+ description: Deprecated - no effect
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: force
+ shorthand: f
+ value_type: bool
+ default_value: "false"
+ description: Don't ask to confirm removal
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: stop
+ shorthand: s
+ value_type: bool
+ default_value: "false"
+ description: Stop the containers, if required, before removing
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: volumes
+ shorthand: v
+ value_type: bool
+ default_value: "false"
+ description: Remove any anonymous volumes attached to containers
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_run.yaml b/docs/reference/docker_compose_run.yaml
index 1862a70a299..61c7ca0e8cb 100644
--- a/docs/reference/docker_compose_run.yaml
+++ b/docs/reference/docker_compose_run.yaml
@@ -1,243 +1,334 @@
command: docker compose run
-short: Run a one-off command on a service.
+short: Run a one-off command on a service
long: |-
- Runs a one-time command against a service.
+ Runs a one-time command against a service.
- the following command starts the `web` service and runs `bash` as its command:
+ The following command starts the `web` service and runs `bash` as its command:
- ```console
- $ docker compose run web bash
- ```
+ ```console
+ $ docker compose run web bash
+ ```
- Commands you use with run start in new containers with configuration defined by that of the service,
- including volumes, links, and other details. However, there are two important differences:
+ Commands you use with run start in new containers with configuration defined by that of the service,
+ including volumes, links, and other details. However, there are two important differences:
- First, the command passed by `run` overrides the command defined in the service configuration. For example, if the
- `web` service configuration is started with `bash`, then `docker compose run web python app.py` overrides it with
- `python app.py`.
+ First, the command passed by `run` overrides the command defined in the service configuration. For example, if the
+ `web` service configuration is started with `bash`, then `docker compose run web python app.py` overrides it with
+ `python app.py`.
- The second difference is that the `docker compose run` command does not create any of the ports specified in the
- service configuration. This prevents port collisions with already-open ports. If you do want the service’s ports
- to be created and mapped to the host, specify the `--service-ports`
+ The second difference is that the `docker compose run` command does not create any of the ports specified in the
+ service configuration. This prevents port collisions with already-open ports. If you do want the service’s ports
+ to be created and mapped to the host, specify the `--service-ports`
- ```console
- $ docker compose run --service-ports web python manage.py shell
- ```
+ ```console
+ $ docker compose run --service-ports web python manage.py shell
+ ```
- Alternatively, manual port mapping can be specified with the `--publish` or `-p` options, just as when using docker run:
+ Alternatively, manual port mapping can be specified with the `--publish` or `-p` options, just as when using docker run:
- ```console
- $ docker compose run --publish 8080:80 -p 2022:22 -p 127.0.0.1:2021:21 web python manage.py shell
- ```
+ ```console
+ $ docker compose run --publish 8080:80 -p 2022:22 -p 127.0.0.1:2021:21 web python manage.py shell
+ ```
- If you start a service configured with links, the run command first checks to see if the linked service is running
- and starts the service if it is stopped. Once all the linked services are running, the run executes the command you
- passed it. For example, you could run:
+ If you start a service configured with links, the run command first checks to see if the linked service is running
+ and starts the service if it is stopped. Once all the linked services are running, the run executes the command you
+ passed it. For example, you could run:
- ```console
- $ docker compose run db psql -h db -U docker
- ```
+ ```console
+ $ docker compose run db psql -h db -U docker
+ ```
- This opens an interactive PostgreSQL shell for the linked `db` container.
+ This opens an interactive PostgreSQL shell for the linked `db` container.
- If you do not want the run command to start linked containers, use the `--no-deps` flag:
+ If you do not want the run command to start linked containers, use the `--no-deps` flag:
- ```console
- $ docker compose run --no-deps web python manage.py shell
- ```
+ ```console
+ $ docker compose run --no-deps web python manage.py shell
+ ```
- If you want to remove the container after running while overriding the container’s restart policy, use the `--rm` flag:
+ If you want to remove the container after running while overriding the container’s restart policy, use the `--rm` flag:
- ```console
- $ docker compose run --rm web python manage.py db upgrade
- ```
+ ```console
+ $ docker compose run --rm web python manage.py db upgrade
+ ```
- This runs a database upgrade script, and removes the container when finished running, even if a restart policy is
- specified in the service configuration.
-usage: docker compose run [options] [-v VOLUME...] [-p PORT...] [-e KEY=VAL...] [-l
- KEY=VALUE...] SERVICE [COMMAND] [ARGS...]
+ This runs a database upgrade script, and removes the container when finished running, even if a restart policy is
+ specified in the service configuration.
+usage: docker compose run [OPTIONS] SERVICE [COMMAND] [ARGS...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: detach
- shorthand: d
- value_type: bool
- default_value: "false"
- description: Run container in background and print container ID
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: entrypoint
- value_type: string
- description: Override the entrypoint of the image
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: env
- shorthand: e
- value_type: stringArray
- default_value: '[]'
- description: Set environment variables
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: interactive
- shorthand: i
- value_type: bool
- default_value: "true"
- description: Keep STDIN open even if not attached.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: label
- shorthand: l
- value_type: stringArray
- default_value: '[]'
- description: Add or override a label
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: name
- value_type: string
- description: Assign a name to the container
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-TTY
- shorthand: T
- value_type: bool
- default_value: "true"
- description: 'Disable pseudo-TTY allocation (default: auto-detected).'
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-deps
- value_type: bool
- default_value: "false"
- description: Don't start linked services.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: publish
- shorthand: p
- value_type: stringArray
- default_value: '[]'
- description: Publish a container's port(s) to the host.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: quiet-pull
- value_type: bool
- default_value: "false"
- description: Pull without printing progress information.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: rm
- value_type: bool
- default_value: "false"
- description: Automatically remove the container when it exits
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: service-ports
- value_type: bool
- default_value: "false"
- description: |
- Run command with the service's ports enabled and mapped to the host.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: tty
- shorthand: t
- value_type: bool
- default_value: "true"
- description: Allocate a pseudo-TTY.
- deprecated: false
- hidden: true
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: use-aliases
- value_type: bool
- default_value: "false"
- description: |
- Use the service's network useAliases in the network(s) the container connects to.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: user
- shorthand: u
- value_type: string
- description: Run as specified username or uid
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: volume
- shorthand: v
- value_type: stringArray
- default_value: '[]'
- description: Bind mount a volume.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: workdir
- shorthand: w
- value_type: string
- description: Working directory inside the container
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: build
+ value_type: bool
+ default_value: "false"
+ description: Build image before starting container
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: cap-add
+ value_type: list
+ description: Add Linux capabilities
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: cap-drop
+ value_type: list
+ description: Drop Linux capabilities
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: detach
+ shorthand: d
+ value_type: bool
+ default_value: "false"
+ description: Run container in background and print container ID
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: entrypoint
+ value_type: string
+ description: Override the entrypoint of the image
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: env
+ shorthand: e
+ value_type: stringArray
+ default_value: '[]'
+ description: Set environment variables
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: env-from-file
+ value_type: stringArray
+ default_value: '[]'
+ description: Set environment variables from file
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: interactive
+ shorthand: i
+ value_type: bool
+ default_value: "true"
+ description: Keep STDIN open even if not attached
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: label
+ shorthand: l
+ value_type: stringArray
+ default_value: '[]'
+ description: Add or override a label
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: name
+ value_type: string
+ description: Assign a name to the container
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-TTY
+ shorthand: T
+ value_type: bool
+ default_value: "true"
+ description: 'Disable pseudo-TTY allocation (default: auto-detected)'
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-deps
+ value_type: bool
+ default_value: "false"
+ description: Don't start linked services
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: publish
+ shorthand: p
+ value_type: stringArray
+ default_value: '[]'
+ description: Publish a container's port(s) to the host
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: pull
+ value_type: string
+ default_value: policy
+ description: Pull image before running ("always"|"missing"|"never")
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet
+ shorthand: q
+ value_type: bool
+ default_value: "false"
+ description: Don't print anything to STDOUT
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet-build
+ value_type: bool
+ default_value: "false"
+ description: Suppress progress output from the build process
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet-pull
+ value_type: bool
+ default_value: "false"
+ description: Pull without printing progress information
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: remove-orphans
+ value_type: bool
+ default_value: "false"
+ description: Remove containers for services not defined in the Compose file
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: rm
+ value_type: bool
+ default_value: "false"
+ description: Automatically remove the container when it exits
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: service-ports
+ shorthand: P
+ value_type: bool
+ default_value: "false"
+ description: |
+ Run command with all service's ports enabled and mapped to the host
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: tty
+ shorthand: t
+ value_type: bool
+ default_value: "true"
+ description: Allocate a pseudo-TTY
+ deprecated: false
+ hidden: true
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: use-aliases
+ value_type: bool
+ default_value: "false"
+ description: |
+ Use the service's network useAliases in the network(s) the container connects to
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: user
+ shorthand: u
+ value_type: string
+ description: Run as specified username or uid
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: volume
+ shorthand: v
+ value_type: stringArray
+ default_value: '[]'
+ description: Bind mount a volume
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: workdir
+ shorthand: w
+ value_type: string
+ description: Working directory inside the container
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_scale.yaml b/docs/reference/docker_compose_scale.yaml
new file mode 100644
index 00000000000..f840a51b4e1
--- /dev/null
+++ b/docs/reference/docker_compose_scale.yaml
@@ -0,0 +1,35 @@
+command: docker compose scale
+short: Scale services
+long: Scale services
+usage: docker compose scale [SERVICE=REPLICAS...]
+pname: docker compose
+plink: docker_compose.yaml
+options:
+ - option: no-deps
+ value_type: bool
+ default_value: "false"
+ description: Don't start linked services
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_start.yaml b/docs/reference/docker_compose_start.yaml
index 165f7a32e29..902b688d3e7 100644
--- a/docs/reference/docker_compose_start.yaml
+++ b/docs/reference/docker_compose_start.yaml
@@ -1,10 +1,22 @@
command: docker compose start
short: Start services
-long: Starts existing containers for a service.
+long: Starts existing containers for a service
usage: docker compose start [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_stats.yaml b/docs/reference/docker_compose_stats.yaml
new file mode 100644
index 00000000000..e6854b05a25
--- /dev/null
+++ b/docs/reference/docker_compose_stats.yaml
@@ -0,0 +1,71 @@
+command: docker compose stats
+short: Display a live stream of container(s) resource usage statistics
+long: Display a live stream of container(s) resource usage statistics
+usage: docker compose stats [OPTIONS] [SERVICE]
+pname: docker compose
+plink: docker_compose.yaml
+options:
+ - option: all
+ shorthand: a
+ value_type: bool
+ default_value: "false"
+ description: Show all containers (default shows just running)
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: format
+ value_type: string
+ description: |-
+ Format output using a custom template:
+ 'table': Print output in table format with column headers (default)
+ 'table TEMPLATE': Print output in table format using the given Go template
+ 'json': Print in JSON format
+ 'TEMPLATE': Print output using the given Go template.
+ Refer to https://docs.docker.com/engine/cli/formatting/ for more information about formatting output with templates
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-stream
+ value_type: bool
+ default_value: "false"
+ description: Disable streaming stats and only pull the first result
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-trunc
+ value_type: bool
+ default_value: "false"
+ description: Do not truncate output
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_stop.yaml b/docs/reference/docker_compose_stop.yaml
index e0c74f598f5..f2ec34ccb3d 100644
--- a/docs/reference/docker_compose_stop.yaml
+++ b/docs/reference/docker_compose_stop.yaml
@@ -1,23 +1,35 @@
command: docker compose stop
short: Stop services
long: |
- Stops running containers without removing them. They can be started again with `docker compose start`.
-usage: docker compose stop [SERVICE...]
+ Stops running containers without removing them. They can be started again with `docker compose start`.
+usage: docker compose stop [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: timeout
- shorthand: t
- value_type: int
- default_value: "10"
- description: Specify a shutdown timeout in seconds
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: timeout
+ shorthand: t
+ value_type: int
+ default_value: "0"
+ description: Specify a shutdown timeout in seconds
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_top.yaml b/docs/reference/docker_compose_top.yaml
index 73987efc330..17cdf7e3818 100644
--- a/docs/reference/docker_compose_top.yaml
+++ b/docs/reference/docker_compose_top.yaml
@@ -1,17 +1,29 @@
command: docker compose top
short: Display the running processes
-long: Displays the running processes.
+long: Displays the running processes
usage: docker compose top [SERVICES...]
pname: docker compose
plink: docker_compose.yaml
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
examples: |-
- ```console
- $ docker compose top
- example_foo_1
- UID PID PPID C STIME TTY TIME CMD
- root 142353 142331 2 15:33 ? 00:00:00 ping localhost -c 5
- ```
+ ```console
+ $ docker compose top
+ example_foo_1
+ UID PID PPID C STIME TTY TIME CMD
+ root 142353 142331 2 15:33 ? 00:00:00 ping localhost -c 5
+ ```
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_unpause.yaml b/docs/reference/docker_compose_unpause.yaml
index 0a2961ba211..e2047720b8f 100644
--- a/docs/reference/docker_compose_unpause.yaml
+++ b/docs/reference/docker_compose_unpause.yaml
@@ -1,10 +1,22 @@
command: docker compose unpause
short: Unpause services
-long: Unpauses paused containers of a service.
+long: Unpauses paused containers of a service
usage: docker compose unpause [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_up.yaml b/docs/reference/docker_compose_up.yaml
index e76c20e8324..8c78a8fa683 100644
--- a/docs/reference/docker_compose_up.yaml
+++ b/docs/reference/docker_compose_up.yaml
@@ -1,236 +1,348 @@
command: docker compose up
short: Create and start containers
long: |-
- Builds, (re)creates, starts, and attaches to containers for a service.
+ Builds, (re)creates, starts, and attaches to containers for a service.
- Unless they are already running, this command also starts any linked services.
+ Unless they are already running, this command also starts any linked services.
- The `docker compose up` command aggregates the output of each container (like `docker compose logs --follow` does).
- When the command exits, all containers are stopped. Running `docker compose up --detach` starts the containers in the
- background and leaves them running.
+ The `docker compose up` command aggregates the output of each container (like `docker compose logs --follow` does).
+ One can optionally select a subset of services to attach to using `--attach` flag, or exclude some services using
+ `--no-attach` to prevent output to be flooded by some verbose services.
- If there are existing containers for a service, and the service’s configuration or image was changed after the
- container’s creation, `docker compose up` picks up the changes by stopping and recreating the containers
- (preserving mounted volumes). To prevent Compose from picking up changes, use the `--no-recreate` flag.
+ When the command exits, all containers are stopped. Running `docker compose up --detach` starts the containers in the
+ background and leaves them running.
- If you want to force Compose to stop and recreate all containers, use the `--force-recreate` flag.
+ If there are existing containers for a service, and the service’s configuration or image was changed after the
+ container’s creation, `docker compose up` picks up the changes by stopping and recreating the containers
+ (preserving mounted volumes). To prevent Compose from picking up changes, use the `--no-recreate` flag.
- If the process encounters an error, the exit code for this command is `1`.
- If the process is interrupted using `SIGINT` (ctrl + C) or `SIGTERM`, the containers are stopped, and the exit code is `0`.
-usage: docker compose up [SERVICE...]
+ If you want to force Compose to stop and recreate all containers, use the `--force-recreate` flag.
+
+ If the process encounters an error, the exit code for this command is `1`.
+ If the process is interrupted using `SIGINT` (ctrl + C) or `SIGTERM`, the containers are stopped, and the exit code is `0`.
+usage: docker compose up [OPTIONS] [SERVICE...]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: abort-on-container-exit
- value_type: bool
- default_value: "false"
- description: |
- Stops all containers if any container was stopped. Incompatible with -d
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: always-recreate-deps
- value_type: bool
- default_value: "false"
- description: Recreate dependent containers. Incompatible with --no-recreate.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: attach
- value_type: stringArray
- default_value: '[]'
- description: Attach to service output.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: attach-dependencies
- value_type: bool
- default_value: "false"
- description: Attach to dependent containers.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: build
- value_type: bool
- default_value: "false"
- description: Build images before starting containers.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: detach
- shorthand: d
- value_type: bool
- default_value: "false"
- description: 'Detached mode: Run containers in the background'
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: exit-code-from
- value_type: string
- description: |
- Return the exit code of the selected service container. Implies --abort-on-container-exit
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: force-recreate
- value_type: bool
- default_value: "false"
- description: |
- Recreate containers even if their configuration and image haven't changed.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-build
- value_type: bool
- default_value: "false"
- description: Don't build an image, even if it's missing.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-color
- value_type: bool
- default_value: "false"
- description: Produce monochrome output.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-deps
- value_type: bool
- default_value: "false"
- description: Don't start linked services.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-log-prefix
- value_type: bool
- default_value: "false"
- description: Don't print prefix in logs.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-recreate
- value_type: bool
- default_value: "false"
- description: |
- If containers already exist, don't recreate them. Incompatible with --force-recreate.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: no-start
- value_type: bool
- default_value: "false"
- description: Don't start the services after creating them.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: quiet-pull
- value_type: bool
- default_value: "false"
- description: Pull without printing progress information.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: remove-orphans
- value_type: bool
- default_value: "false"
- description: Remove containers for services not defined in the Compose file.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: renew-anon-volumes
- shorthand: V
- value_type: bool
- default_value: "false"
- description: |
- Recreate anonymous volumes instead of retrieving data from the previous containers.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: scale
- value_type: stringArray
- default_value: '[]'
- description: |
- Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: timeout
- shorthand: t
- value_type: int
- default_value: "10"
- description: |
- Use this timeout in seconds for container shutdown when attached or when containers are already running.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: wait
- value_type: bool
- default_value: "false"
- description: Wait for services to be running|healthy. Implies detached mode.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: abort-on-container-exit
+ value_type: bool
+ default_value: "false"
+ description: |
+ Stops all containers if any container was stopped. Incompatible with -d
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: abort-on-container-failure
+ value_type: bool
+ default_value: "false"
+ description: |
+ Stops all containers if any container exited with failure. Incompatible with -d
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: always-recreate-deps
+ value_type: bool
+ default_value: "false"
+ description: Recreate dependent containers. Incompatible with --no-recreate.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: attach
+ value_type: stringArray
+ default_value: '[]'
+ description: |
+ Restrict attaching to the specified services. Incompatible with --attach-dependencies.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: attach-dependencies
+ value_type: bool
+ default_value: "false"
+ description: Automatically attach to log output of dependent services
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: build
+ value_type: bool
+ default_value: "false"
+ description: Build images before starting containers
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: detach
+ shorthand: d
+ value_type: bool
+ default_value: "false"
+ description: 'Detached mode: Run containers in the background'
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: exit-code-from
+ value_type: string
+ description: |
+ Return the exit code of the selected service container. Implies --abort-on-container-exit
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: force-recreate
+ value_type: bool
+ default_value: "false"
+ description: |
+ Recreate containers even if their configuration and image haven't changed
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: menu
+ value_type: bool
+ default_value: "false"
+ description: |
+ Enable interactive shortcuts when running attached. Incompatible with --detach. Can also be enable/disable by setting COMPOSE_MENU environment var.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-attach
+ value_type: stringArray
+ default_value: '[]'
+ description: Do not attach (stream logs) to the specified services
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-build
+ value_type: bool
+ default_value: "false"
+ description: Don't build an image, even if it's policy
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-color
+ value_type: bool
+ default_value: "false"
+ description: Produce monochrome output
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-deps
+ value_type: bool
+ default_value: "false"
+ description: Don't start linked services
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-log-prefix
+ value_type: bool
+ default_value: "false"
+ description: Don't print prefix in logs
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-recreate
+ value_type: bool
+ default_value: "false"
+ description: |
+ If containers already exist, don't recreate them. Incompatible with --force-recreate.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: no-start
+ value_type: bool
+ default_value: "false"
+ description: Don't start the services after creating them
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: pull
+ value_type: string
+ default_value: policy
+ description: Pull image before running ("always"|"missing"|"never")
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet-build
+ value_type: bool
+ default_value: "false"
+ description: Suppress the build output
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet-pull
+ value_type: bool
+ default_value: "false"
+ description: Pull without printing progress information
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: remove-orphans
+ value_type: bool
+ default_value: "false"
+ description: Remove containers for services not defined in the Compose file
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: renew-anon-volumes
+ shorthand: V
+ value_type: bool
+ default_value: "false"
+ description: |
+ Recreate anonymous volumes instead of retrieving data from the previous containers
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: scale
+ value_type: stringArray
+ default_value: '[]'
+ description: |
+ Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: timeout
+ shorthand: t
+ value_type: int
+ default_value: "0"
+ description: |
+ Use this timeout in seconds for container shutdown when attached or when containers are already running
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: timestamps
+ value_type: bool
+ default_value: "false"
+ description: Show timestamps
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: wait
+ value_type: bool
+ default_value: "false"
+ description: Wait for services to be running|healthy. Implies detached mode.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: wait-timeout
+ value_type: int
+ default_value: "0"
+ description: |
+ Maximum duration in seconds to wait for the project to be running|healthy
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: watch
+ shorthand: w
+ value_type: bool
+ default_value: "false"
+ description: |
+ Watch source code and rebuild/refresh containers when files are updated.
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: "yes"
+ shorthand: "y"
+ value_type: bool
+ default_value: "false"
+ description: Assume "yes" as answer to all prompts and run non-interactively
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_version.yaml b/docs/reference/docker_compose_version.yaml
index cc7c5ca3480..789e94818ea 100644
--- a/docs/reference/docker_compose_version.yaml
+++ b/docs/reference/docker_compose_version.yaml
@@ -1,31 +1,43 @@
command: docker compose version
short: Show the Docker Compose version information
long: Show the Docker Compose version information
-usage: docker compose version
+usage: docker compose version [OPTIONS]
pname: docker compose
plink: docker_compose.yaml
options:
-- option: format
- shorthand: f
- value_type: string
- description: 'Format the output. Values: [pretty | json]. (Default: pretty)'
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
-- option: short
- value_type: bool
- default_value: "false"
- description: Shows only Compose's version number.
- deprecated: false
- hidden: false
- experimental: false
- experimentalcli: false
- kubernetes: false
- swarm: false
+ - option: format
+ shorthand: f
+ value_type: string
+ description: 'Format the output. Values: [pretty | json]. (Default: pretty)'
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: short
+ value_type: bool
+ default_value: "false"
+ description: Shows only Compose's version number
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
deprecated: false
+hidden: false
experimental: false
experimentalcli: false
kubernetes: false
diff --git a/docs/reference/docker_compose_volumes.yaml b/docs/reference/docker_compose_volumes.yaml
new file mode 100644
index 00000000000..20516db7f13
--- /dev/null
+++ b/docs/reference/docker_compose_volumes.yaml
@@ -0,0 +1,52 @@
+command: docker compose volumes
+short: List volumes
+long: List volumes
+usage: docker compose volumes [OPTIONS] [SERVICE...]
+pname: docker compose
+plink: docker_compose.yaml
+options:
+ - option: format
+ value_type: string
+ default_value: table
+ description: |-
+ Format output using a custom template:
+ 'table': Print output in table format with column headers (default)
+ 'table TEMPLATE': Print output in table format using the given Go template
+ 'json': Print in JSON format
+ 'TEMPLATE': Print output using the given Go template.
+ Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet
+ shorthand: q
+ value_type: bool
+ default_value: "false"
+ description: Only display volume names
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_wait.yaml b/docs/reference/docker_compose_wait.yaml
new file mode 100644
index 00000000000..5d8f3013cc0
--- /dev/null
+++ b/docs/reference/docker_compose_wait.yaml
@@ -0,0 +1,35 @@
+command: docker compose wait
+short: Block until containers of all (or specified) services stop.
+long: Block until containers of all (or specified) services stop.
+usage: docker compose wait SERVICE [SERVICE...] [OPTIONS]
+pname: docker compose
+plink: docker_compose.yaml
+options:
+ - option: down-project
+ value_type: bool
+ default_value: "false"
+ description: Drops project when the first container stops
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/reference/docker_compose_watch.yaml b/docs/reference/docker_compose_watch.yaml
new file mode 100644
index 00000000000..a3e3e802201
--- /dev/null
+++ b/docs/reference/docker_compose_watch.yaml
@@ -0,0 +1,57 @@
+command: docker compose watch
+short: |
+ Watch build context for service and rebuild/refresh containers when files are updated
+long: |
+ Watch build context for service and rebuild/refresh containers when files are updated
+usage: docker compose watch [SERVICE...]
+pname: docker compose
+plink: docker_compose.yaml
+options:
+ - option: no-up
+ value_type: bool
+ default_value: "false"
+ description: Do not build & start services before watching
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: prune
+ value_type: bool
+ default_value: "true"
+ description: Prune dangling images on rebuild
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+ - option: quiet
+ value_type: bool
+ default_value: "false"
+ description: hide build output
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+inherited_options:
+ - option: dry-run
+ value_type: bool
+ default_value: "false"
+ description: Execute command in dry run mode
+ deprecated: false
+ hidden: false
+ experimental: false
+ experimentalcli: false
+ kubernetes: false
+ swarm: false
+deprecated: false
+hidden: false
+experimental: false
+experimentalcli: false
+kubernetes: false
+swarm: false
+
diff --git a/docs/sdk.md b/docs/sdk.md
new file mode 100644
index 00000000000..3490a8198a1
--- /dev/null
+++ b/docs/sdk.md
@@ -0,0 +1,157 @@
+# Using the `docker/compose` SDK
+
+The `docker/compose` package can be used as a Go library by third-party applications to programmatically manage
+containerized applications defined in Compose files. This SDK provides a comprehensive API that lets you
+integrate Compose functionality directly into your applications, allowing you to load, validate, and manage
+multi-container environments without relying on the Compose CLI.
+
+Whether you need to orchestrate containers as part of
+a deployment pipeline, build custom management tools, or embed container orchestration into your application, the
+Compose SDK offers the same powerful capabilities that drive the Docker Compose command-line tool.
+
+## Set up the SDK
+
+To get started, create an SDK instance using the `NewComposeService()` function, which initializes a service with the
+necessary configuration to interact with the Docker daemon and manage Compose projects. This service instance provides
+methods for all core Compose operations including creating, starting, stopping, and removing containers, as well as
+loading and validating Compose files. The service handles the underlying Docker API interactions and resource
+management, allowing you to focus on your application logic.
+
+## Example usage
+
+Here's a basic example demonstrating how to load a Compose project and start the services:
+
+```go
+package main
+
+import (
+ "context"
+ "log"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/cli/cli/flags"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose"
+)
+
+func main() {
+ ctx := context.Background()
+
+ dockerCLI, err := command.NewDockerCli()
+ if err != nil {
+ log.Fatalf("Failed to create docker CLI: %v", err)
+ }
+ err = dockerCLI.Initialize(flags.ClientOptions{})
+ if err != nil {
+ log.Fatalf("Failed to initialize docker CLI: %v", err)
+ }
+
+ // Create a new Compose service instance
+ service, err := compose.NewComposeService(dockerCLI)
+ if err != nil {
+ log.Fatalf("Failed to create compose service: %v", err)
+ }
+
+ // Load the Compose project from a compose file
+ project, err := service.LoadProject(ctx, api.ProjectLoadOptions{
+ ConfigPaths: []string{"compose.yaml"},
+ ProjectName: "my-app",
+ })
+ if err != nil {
+ log.Fatalf("Failed to load project: %v", err)
+ }
+
+ // Start the services defined in the Compose file
+ err = service.Up(ctx, project, api.UpOptions{
+ Create: api.CreateOptions{},
+ Start: api.StartOptions{},
+ })
+ if err != nil {
+ log.Fatalf("Failed to start services: %v", err)
+ }
+
+ log.Printf("Successfully started project: %s", project.Name)
+}
+```
+
+This example demonstrates the core workflow - creating a service instance, loading a project from a Compose file, and
+starting the services. The SDK provides many additional operations for managing the lifecycle of your containerized
+application.
+
+## Customizing the SDK
+
+The `NewComposeService()` function accepts optional `compose.Option` parameters to customize the SDK behavior. These
+options allow you to configure I/O streams, concurrency limits, dry-run mode, and other advanced features.
+
+```go
+ // Create a custom output buffer to capture logs
+ var outputBuffer bytes.Buffer
+
+ // Create a compose service with custom options
+ service, err := compose.NewComposeService(dockerCLI,
+ compose.WithOutputStream(&outputBuffer), // Redirect output to custom writer
+ compose.WithErrorStream(os.Stderr), // Use stderr for errors
+ compose.WithMaxConcurrency(4), // Limit concurrent operations
+ compose.WithPrompt(compose.AlwaysOkPrompt()), // Auto-confirm all prompts
+ )
+```
+
+### Available options
+
+- `WithOutputStream(io.Writer)` - Redirect standard output to a custom writer
+- `WithErrorStream(io.Writer)` - Redirect error output to a custom writer
+- `WithInputStream(io.Reader)` - Provide a custom input stream for interactive prompts
+- `WithStreams(out, err, in)` - Set all I/O streams at once
+- `WithMaxConcurrency(int)` - Limit the number of concurrent operations against the Docker API
+- `WithPrompt(Prompt)` - Customize user confirmation behavior (use `AlwaysOkPrompt()` for non-interactive mode)
+- `WithDryRun` - Run operations in dry-run mode without actually applying changes
+- `WithContextInfo(api.ContextInfo)` - Set custom Docker context information
+- `WithProxyConfig(map[string]string)` - Configure HTTP proxy settings for builds
+- `WithEventProcessor(progress.EventProcessor)` - Receive progress events and operation notifications
+
+These options provide fine-grained control over the SDK's behavior, making it suitable for various integration
+scenarios including CLI tools, web services, automation scripts, and testing environments.
+
+## Tracking operations with `EventProcessor`
+
+The `EventProcessor` interface allows you to monitor Compose operations in real-time by receiving events about changes
+applied to Docker resources such as images, containers, volumes, and networks. This is particularly useful for building
+user interfaces, logging systems, or monitoring tools that need to track the progress of Compose operations.
+
+### Understanding `EventProcessor`
+
+A Compose operation, such as `up`, `down`, `build`, performs a series of changes to Docker resources. The
+`EventProcessor` receives notifications about these changes through three key methods:
+
+- `Start(ctx, operation)` - Called when a Compose operation begins, for example `up`
+- `On(events...)` - Called with progress events for individual resource changes, for example, container starting, image
+ being pulled
+- `Done(operation, success)` - Called when the operation completes, indicating success or failure
+
+Each event contains information about the resource being modified, its current status, and progress indicators when
+applicable (such as download progress for image pulls).
+
+### Event status types
+
+Events report resource changes with the following status types:
+
+- Working - Operation is in progress, for example, creating, starting, pulling
+- Done - Operation completed successfully
+- Warning - Operation completed with warnings
+- Error - Operation failed
+
+Common status text values include: `Creating`, `Created`, `Starting`, `Started`, `Running`, `Stopping`, `Stopped`,
+`Removing`, `Removed`, `Building`, `Built`, `Pulling`, `Pulled`, and more.
+
+### Built-in `EventProcessor` implementations
+
+The SDK provides three ready-to-use `EventProcessor` implementations:
+
+- `progress.NewTTYWriter(io.Writer)` - Renders an interactive terminal UI with progress bars and task lists
+ (similar to the Docker Compose CLI output)
+- `progress.NewPlainWriter(io.Writer)` - Outputs simple text-based progress messages suitable for non-interactive
+ environments or log files
+- `progress.NewJSONWriter()` - Render events as JSON objects
+- `progress.NewQuietWriter()` - (Default) Silently processes events without producing any output
+
+Using `EventProcessor`, a custom UI can be plugged into `docker/compose`.
diff --git a/docs/yaml/main/generate.go b/docs/yaml/main/generate.go
index 90ac25cf137..272727b0086 100644
--- a/docs/yaml/main/generate.go
+++ b/docs/yaml/main/generate.go
@@ -23,7 +23,7 @@ import (
clidocstool "github.com/docker/cli-docs-tool"
"github.com/docker/cli/cli/command"
- "github.com/docker/compose/v2/cmd/compose"
+ "github.com/docker/compose/v5/cmd/compose"
"github.com/spf13/cobra"
)
@@ -48,7 +48,21 @@ func generateDocs(opts *options) error {
if err != nil {
return err
}
- return tool.GenAllTree()
+ for _, format := range opts.formats {
+ switch format {
+ case "yaml":
+ if err := tool.GenYamlTree(cmd); err != nil {
+ return err
+ }
+ case "md":
+ if err := tool.GenMarkdownTree(cmd); err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("unknown format %q", format)
+ }
+ }
+ return nil
}
func disableFlagsInUseLine(cmd *cobra.Command) {
@@ -69,15 +83,17 @@ func visitAll(root *cobra.Command, fn func(*cobra.Command)) {
}
type options struct {
- source string
- target string
+ source string
+ target string
+ formats []string
}
func main() {
cwd, _ := os.Getwd()
opts := &options{
- source: filepath.Join(cwd, "docs", "reference"),
- target: filepath.Join(cwd, "docs", "reference"),
+ source: filepath.Join(cwd, "docs", "reference"),
+ target: filepath.Join(cwd, "docs", "reference"),
+ formats: []string{"yaml", "md"},
}
fmt.Printf("Project root: %s\n", opts.source)
fmt.Printf("Generating yaml files into %s\n", opts.target)
diff --git a/go.mod b/go.mod
index 65ea1f49b2b..af5d5a7ad23 100644
--- a/go.mod
+++ b/go.mod
@@ -1,147 +1,165 @@
-module github.com/docker/compose/v2
+module github.com/docker/compose/v5
-go 1.17
+go 1.24.9
require (
- github.com/AlecAivazis/survey/v2 v2.3.2
+ github.com/AlecAivazis/survey/v2 v2.3.7
+ github.com/DefangLabs/secret-detector v0.0.0-20250403165618-22662109213e
+ github.com/Microsoft/go-winio v0.6.2
+ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
github.com/buger/goterm v1.0.4
- github.com/cnabio/cnab-to-oci v0.3.1-beta1
- github.com/compose-spec/compose-go v1.2.2
- github.com/containerd/console v1.0.3
- github.com/containerd/containerd v1.6.2
- github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e
- github.com/docker/buildx v0.8.1 // when updating, also update the replace rules accordingly
- github.com/docker/cli v20.10.12+incompatible
- github.com/docker/cli-docs-tool v0.4.0
- github.com/docker/docker v20.10.7+incompatible
- github.com/docker/go-connections v0.4.0
- github.com/docker/go-units v0.4.0
- github.com/golang/mock v1.6.0
- github.com/hashicorp/go-multierror v1.1.1
- github.com/hashicorp/go-version v1.3.0
- github.com/mattn/go-isatty v0.0.14
+ github.com/compose-spec/compose-go/v2 v2.9.1
+ github.com/containerd/console v1.0.5
+ github.com/containerd/containerd/v2 v2.2.0
+ github.com/containerd/errdefs v1.0.0
+ github.com/containerd/platforms v1.0.0-rc.2
+ github.com/distribution/reference v0.6.0
+ github.com/docker/buildx v0.29.1
+ github.com/docker/cli v28.5.2+incompatible
+ github.com/docker/cli-docs-tool v0.10.0
+ github.com/docker/docker v28.5.2+incompatible
+ github.com/docker/go-connections v0.6.0
+ github.com/docker/go-units v0.5.0
+ github.com/eiannone/keyboard v0.0.0-20220611211555-0d226195f203
+ github.com/fsnotify/fsevents v0.2.0
+ github.com/go-viper/mapstructure/v2 v2.4.0
+ github.com/google/go-cmp v0.7.0
+ github.com/google/uuid v1.6.0
+ github.com/hashicorp/go-version v1.7.0
+ github.com/jonboulle/clockwork v0.5.0
github.com/mattn/go-shellwords v1.0.12
- github.com/moby/buildkit v0.10.0-rc2.0.20220308185020-fdecd0ae108b
- github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
+ github.com/mitchellh/go-ps v1.0.0
+ github.com/moby/buildkit v0.25.2
+ github.com/moby/go-archive v0.1.0
+ github.com/moby/patternmatcher v0.6.0
+ github.com/moby/sys/atomicwriter v0.1.0
+ github.com/moby/term v0.5.2
github.com/morikuni/aec v1.0.0
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/image-spec v1.0.2
- github.com/pkg/errors v0.9.1
- github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b
- github.com/sirupsen/logrus v1.8.1
- github.com/spf13/cobra v1.4.0
- github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.7.0
- github.com/theupdateframework/notary v0.6.1
- golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- gotest.tools v2.2.0+incompatible
- gotest.tools/v3 v3.1.0
+ github.com/opencontainers/image-spec v1.1.1
+ github.com/otiai10/copy v1.14.1
+ github.com/sirupsen/logrus v1.9.3
+ github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
+ github.com/spf13/cobra v1.10.1
+ github.com/spf13/pflag v1.0.10
+ github.com/stretchr/testify v1.11.1
+ github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0
+ go.opentelemetry.io/otel v1.37.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0
+ go.opentelemetry.io/otel/metric v1.37.0
+ go.opentelemetry.io/otel/sdk v1.37.0
+ go.opentelemetry.io/otel/trace v1.37.0
+ go.uber.org/goleak v1.3.0
+ go.uber.org/mock v0.6.0
+ golang.org/x/sync v0.18.0
+ golang.org/x/sys v0.38.0
+ google.golang.org/grpc v1.76.0
+ gopkg.in/yaml.v3 v3.0.1
+ gotest.tools/v3 v3.5.2
+ tags.cncf.io/container-device-interface v1.0.1
)
require (
- github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
- github.com/Masterminds/semver v1.5.0 // indirect
- github.com/Microsoft/go-winio v0.5.1 // indirect
- github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
+ github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
- github.com/cnabio/cnab-go v0.10.0-beta1 // indirect
- github.com/containerd/continuity v0.2.2 // indirect
- github.com/containerd/ttrpc v1.1.0 // indirect
- github.com/containerd/typeurl v1.0.2 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/containerd/containerd/api v1.10.0 // indirect
+ github.com/containerd/continuity v0.4.5 // indirect
+ github.com/containerd/errdefs/pkg v0.3.0 // indirect
+ github.com/containerd/log v0.1.0 // indirect
+ github.com/containerd/ttrpc v1.2.7 // indirect
+ github.com/containerd/typeurl/v2 v2.2.3 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/docker/distribution v2.8.0+incompatible // indirect
- github.com/docker/docker-credential-helpers v0.6.4 // indirect
+ github.com/docker/distribution v2.8.3+incompatible // indirect
+ github.com/docker/docker-credential-helpers v0.9.3 // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-metrics v0.0.1 // indirect
- github.com/felixge/httpsnoop v1.0.2 // indirect
- github.com/fvbommel/sortorder v1.0.1 // indirect
- github.com/go-logr/logr v1.2.2 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/fvbommel/sortorder v1.1.0 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/gofrs/flock v0.8.0 // indirect
- github.com/gogo/googleapis v1.4.1 // indirect
+ github.com/gofrs/flock v0.12.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/protobuf v1.5.2 // indirect
- github.com/google/go-cmp v0.5.7 // indirect
- github.com/google/gofuzz v1.2.0 // indirect
+ github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
- github.com/gorilla/mux v1.8.0 // indirect
- github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
+ github.com/gorilla/mux v1.8.1 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
- github.com/imdario/mergo v0.3.12 // indirect
- github.com/inconshreveable/mousetrap v1.0.0 // indirect
- github.com/json-iterator/go v1.1.12 // indirect
+ github.com/hashicorp/go-multierror v1.1.1 // indirect
+ github.com/in-toto/in-toto-golang v0.9.0 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
- github.com/klauspost/compress v1.15.0 // indirect
- github.com/mattn/go-colorable v0.1.12 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
+ github.com/klauspost/compress v1.18.1 // indirect
+ github.com/magiconair/properties v1.8.9 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
- github.com/miekg/pkcs11 v1.0.3 // indirect
- github.com/mitchellh/mapstructure v1.4.3 // indirect
+ github.com/miekg/pkcs11 v1.1.1 // indirect
+ github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/locker v1.0.1 // indirect
- github.com/moby/sys/signal v0.6.0 // indirect
- github.com/moby/sys/symlink v0.2.0 // indirect
- github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
- github.com/opencontainers/runc v1.1.0 // indirect
- github.com/pelletier/go-toml v1.9.4 // indirect
+ github.com/moby/sys/capability v0.4.0 // indirect
+ github.com/moby/sys/sequential v0.6.0 // indirect
+ github.com/moby/sys/signal v0.7.1 // indirect
+ github.com/moby/sys/symlink v0.3.0 // indirect
+ github.com/moby/sys/user v0.4.0 // indirect
+ github.com/moby/sys/userns v0.1.0 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/otiai10/mint v1.6.3 // indirect
+ github.com/pelletier/go-toml v1.9.5 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_golang v1.12.1 // indirect
- github.com/prometheus/client_model v0.2.0 // indirect
- github.com/prometheus/common v0.32.1 // indirect
- github.com/prometheus/procfs v0.7.3 // indirect
- github.com/qri-io/jsonpointer v0.1.0 // indirect
- github.com/qri-io/jsonschema v0.1.1 // indirect
- github.com/sergi/go-diff v1.1.0 // indirect
- github.com/tonistiigi/fsutil v0.0.0-20220315205639-9ed612626da3 // indirect
+ github.com/prometheus/client_golang v1.23.2 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.66.1 // indirect
+ github.com/prometheus/procfs v0.16.1 // indirect
+ github.com/rivo/uniseg v0.2.0 // indirect
+ github.com/russross/blackfriday/v2 v2.1.0 // indirect
+ github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect
+ github.com/secure-systems-lab/go-securesystemslib v0.6.0 // indirect
+ github.com/shibumi/go-pathspec v1.3.0 // indirect
+ github.com/theupdateframework/notary v0.7.0 // indirect
+ github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323 // indirect
+ github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f // indirect
+ github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 // indirect
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
- github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f // indirect
- github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
- github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
- github.com/xeipuuv/gojsonschema v1.2.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 // indirect
- go.opentelemetry.io/otel v1.4.1 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect
- go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect
- go.opentelemetry.io/otel/metric v0.27.0 // indirect
- go.opentelemetry.io/otel/sdk v1.4.1 // indirect
- go.opentelemetry.io/otel/trace v1.4.1 // indirect
- go.opentelemetry.io/proto/otlp v0.12.0 // indirect
- golang.org/x/crypto v0.0.0-20211202192323-5770296d904e // indirect
- golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect
- golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
- golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
- golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
- golang.org/x/text v0.3.7 // indirect
- golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
- google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
- google.golang.org/grpc v1.44.0 // indirect
- google.golang.org/protobuf v1.27.1 // indirect
- gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
- gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
- k8s.io/apimachinery v0.23.4 // indirect; see replace for the actual version used
- k8s.io/client-go v0.23.4 // indirect; see replace for the actual version used
- k8s.io/klog/v2 v2.30.0 // indirect
- k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
- sigs.k8s.io/yaml v1.2.0 // indirect
+ github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab // indirect
+ github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.5.0 // indirect
+ go.yaml.in/yaml/v2 v2.4.2 // indirect
+ go.yaml.in/yaml/v3 v3.0.4 // indirect
+ golang.org/x/crypto v0.41.0 // indirect
+ golang.org/x/net v0.43.0 // indirect
+ golang.org/x/term v0.34.0 // indirect
+ golang.org/x/text v0.28.0 // indirect
+ golang.org/x/time v0.14.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect
+ google.golang.org/protobuf v1.36.10 // indirect
+ gopkg.in/ini.v1 v1.67.0 // indirect
)
-replace (
- github.com/docker/cli => github.com/docker/cli v20.10.3-0.20220309205733-2b52f62e9627+incompatible
- github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220309172631-83b51522df43+incompatible
-
- // For k8s dependencies, we use a replace directive, to prevent them being
- // upgraded to the version specified in containerd, which is not relevant to the
- // version needed.
- // See https://github.com/docker/buildx/pull/948 for details.
- // https://github.com/docker/buildx/blob/v0.8.1/go.mod#L62-L64
- k8s.io/api => k8s.io/api v0.22.4
- k8s.io/apimachinery => k8s.io/apimachinery v0.22.4
- k8s.io/client-go => k8s.io/client-go v0.22.4
+exclude (
+ // FIXME(thaJeztah): remove this once kubernetes updated their dependencies to no longer need this.
+ //
+ // For additional details, see this PR and links mentioned in that PR:
+ // https://github.com/kubernetes-sigs/kustomize/pull/5830#issuecomment-2569960859
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
)
diff --git a/go.sum b/go.sum
index c3a8716528b..2aa59644bf5 100644
--- a/go.sum
+++ b/go.sum
@@ -1,2249 +1,568 @@
-bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
-bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
-bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
-bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4=
-cloud.google.com/go v0.25.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA=
-cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
-cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
-cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8=
-cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-code.gitea.io/sdk/gitea v0.12.0/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY=
-contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA=
-contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0=
-contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw=
-contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE=
-contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
-git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
-github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg=
-github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0/go.mod h1:0mMDvQFeLbbn1Wy8P2j3hwFhqBq+FKn8OZPno8WLmp8=
-github.com/AlecAivazis/survey/v2 v2.3.2 h1:TqTB+aDDCLYhf9/bD2TwSO8u8jDSmMUd2SUVO4gCnU8=
-github.com/AlecAivazis/survey/v2 v2.3.2/go.mod h1:TH2kPCDU3Kqq7pLbnCWwZXDBjnhZtmsCle5EiYDJ2fg=
-github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU=
-github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
-github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
-github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v19.1.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v42.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0=
-github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
-github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest v10.15.5+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest v12.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest v14.1.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
-github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
-github.com/Azure/go-autorest/autorest v0.10.2/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
-github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
-github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
-github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
-github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
-github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
-github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
-github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
-github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
-github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
-github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
-github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
-github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
-github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc=
-github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
-github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
-github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
-github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
-github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
-github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
+github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
+github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
+github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
+github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/Djarvur/go-err113 v0.0.0-20200410182137-af658d038157/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
-github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
-github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0=
-github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0=
-github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo=
-github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14=
-github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
-github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
-github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
-github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
-github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/go-winio v0.4.15-0.20200908182639-5b44b70ab3ab/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
-github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
-github.com/Microsoft/hcsshim v0.8.10/go.mod h1:g5uw8EV2mAlzqe94tfNBNdr89fnbD/n3HV0OhsddkmM=
-github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
-github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
-github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
-github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
-github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
-github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
-github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
-github.com/Microsoft/hcsshim/test v0.0.0-20200826032352-301c83a30e7c/go.mod h1:30A5igQ91GEmhYJF8TaRP79pMBOYynRsyOByfVV0dU4=
-github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
-github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
-github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw=
-github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc=
-github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
-github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
-github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
-github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
-github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
-github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
-github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
-github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
-github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 h1:w1UutsfOrms1J05zt7ISrnJIXKzwaspym5BTKGx93EI=
-github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0=
-github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
-github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
+github.com/DefangLabs/secret-detector v0.0.0-20250403165618-22662109213e h1:rd4bOvKmDIx0WeTv9Qz+hghsgyjikFiPrseXHlKepO0=
+github.com/DefangLabs/secret-detector v0.0.0-20250403165618-22662109213e/go.mod h1:blbwPQh4DTlCZEfk1BLU4oMIhLda2U+A840Uag9DsZw=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/Microsoft/hcsshim v0.14.0-rc.1 h1:qAPXKwGOkVn8LlqgBN8GS0bxZ83hOJpcjxzmlQKxKsQ=
+github.com/Microsoft/hcsshim v0.14.0-rc.1/go.mod h1:hTKFGbnDtQb1wHiOWv4v0eN+7boSWAHyK/tNAaYZL0c=
+github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
+github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
+github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d h1:hi6J4K6DKrR4/ljxn6SF6nURyu785wKMuQcjt7H3VCQ=
+github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
+github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
+github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
-github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk=
-github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
-github.com/apex/log v1.1.4/go.mod h1:AlpoD9aScyQfJDVHmLMEcx4oU6LqzkWp4Mg9GdAcEvQ=
-github.com/apex/log v1.3.0/go.mod h1:jd8Vpsr46WAe3EZSQ/IUMs2qQD/GOycT5rPWCO1yGcs=
-github.com/apex/logs v0.0.4/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo=
-github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE=
-github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys=
-github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
-github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
-github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=
-github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec=
-github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
-github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
-github.com/aws/aws-sdk-go v1.15.90/go.mod h1:es1KtYUFs7le0xQ3rOihkuoVD90z7D0fR2Qm4S00/gU=
-github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
-github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
-github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
-github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
-github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc=
+github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA=
+github.com/beorn7/perks v0.0.0-20150223135152-b965b613227f/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
+github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
-github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
-github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
-github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
-github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/bmatcuk/doublestar v1.1.5/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
-github.com/bombsimon/wsl/v2 v2.0.0/go.mod h1:mf25kr/SqFEPhhcxW1+7pxzGlW+hIl/hYTKY95VwV8U=
-github.com/bombsimon/wsl/v2 v2.2.0/go.mod h1:Azh8c3XGEJl9LyX0/sFC+CKMc7Ssgua0g+6abzXN4Pg=
-github.com/bombsimon/wsl/v3 v3.0.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc=
-github.com/bombsimon/wsl/v3 v3.1.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc=
-github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
-github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
-github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/buger/goterm v1.0.4 h1:Z9YvGmOih81P0FbVtEYTFF6YsSgxSUKEhf/f9bTMXbY=
github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE=
-github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
-github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
-github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
-github.com/bugsnag/bugsnag-go v1.4.1/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
-github.com/bugsnag/bugsnag-go v1.5.0 h1:tP8hiPv1pGGW3LA6LKy5lW6WG+y9J2xWUdPd3WC452k=
-github.com/bugsnag/bugsnag-go v1.5.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0 h1:s7+5BfS4WFJoVF9pnB8kBk03S7pZXRdKamnV0FOl5Sc=
+github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
+github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
-github.com/bugsnag/panicwrap v1.2.0 h1:OzrKrRvXis8qEvOkfcxNcYbOd2O7xXS2nnKMEMABFQA=
-github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
-github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw=
-github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo=
-github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
-github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
-github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
-github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
-github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
-github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
-github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4=
-github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
-github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
-github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
-github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
-github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
-github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY=
-github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
-github.com/cloudflare/cfssl v1.4.1 h1:vScfU2DrIUI9VPHBVeeAQ0q5A+9yshO1Gz+3QoUQiKw=
-github.com/cloudflare/cfssl v1.4.1/go.mod h1:KManx/OJPb5QY+y0+o/898AMcM128sF0bURvoVUSjTo=
-github.com/cloudflare/go-metrics v0.0.0-20151117154305-6a9aea36fb41/go.mod h1:eaZPlJWD+G9wseg1BuRXlHnjntPMrywMsyxf+LTOdP4=
-github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo=
-github.com/cnabio/cnab-go v0.10.0-beta1 h1:5LEEODVQkyCHfeT6pggPz5zq/PinA/CzlNrChFkfGkg=
-github.com/cnabio/cnab-go v0.10.0-beta1/go.mod h1:5c4uOP6ZppR4nUGtCMAElscRiYEUi44vNQwtSAvISXk=
-github.com/cnabio/cnab-to-oci v0.3.1-beta1 h1:qAuLRt+2J7U7wIB5YG+COtS630NQCf4G1h1p0Yk6llo=
-github.com/cnabio/cnab-to-oci v0.3.1-beta1/go.mod h1:8BomA5Vye+3V/Kd2NSFblCBmp1rJV5NfXBYKbIGT5Rw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
-github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
-github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
-github.com/compose-spec/compose-go v1.0.8/go.mod h1:REnCbBugoIdHB7S1sfkN/aJ7AJpNApGNjNiVjA9L8x4=
-github.com/compose-spec/compose-go v1.2.2 h1:y1dwl3KUTBnWPVur6EZno9zUIum6Q87/F5keljnGQB4=
-github.com/compose-spec/compose-go v1.2.2/go.mod h1:pAy7Mikpeft4pxkFU565/DRHEbDfR84G6AQuiL+Hdg8=
-github.com/compose-spec/godotenv v1.1.1/go.mod h1:zF/3BOa18Z24tts5qnO/E9YURQanJTBUf7nlcCTNsyc=
-github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
-github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
-github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
-github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
-github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
-github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
-github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4=
-github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8=
-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
-github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
-github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
-github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
-github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
-github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
-github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
-github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
-github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
-github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
-github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
-github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
-github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
-github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
-github.com/containerd/containerd v1.6.2 h1:pcaPUGbYW8kBw6OgIZwIVIeEhdWVrBzsoCfVJ5BjrLU=
-github.com/containerd/containerd v1.6.2/go.mod h1:sidY30/InSE1j2vdD1ihtKoJz+lWdaXMdiAeIupaf+s=
-github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
-github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
-github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
-github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
-github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA=
-github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk=
-github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU=
-github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/fuse-overlayfs-snapshotter v1.0.2/go.mod h1:nRZceC8a7dRm3Ao6cJAwuJWPFiBPaibHiFntRUnzhwU=
-github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
-github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
-github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
-github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
-github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
-github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
-github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
-github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
-github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4=
-github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
-github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4=
-github.com/containerd/stargz-snapshotter v0.11.2/go.mod h1:HfhsbZ98KIoqA2GLmibTpRwMF/lq3utZ0ElV9ARqU7M=
-github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
-github.com/containerd/stargz-snapshotter/estargz v0.11.2/go.mod h1:rjbdAXaytDSIrAy2WAy2kUrJ4ehzDS0eUQLlIb5UCY0=
-github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
-github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI=
-github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
-github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
-github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
-github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
-github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
-github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
-github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
-github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
-github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y=
-github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
-github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
-github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
-github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
-github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
-github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
-github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
-github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ=
+github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
+github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
+github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
+github.com/compose-spec/compose-go/v2 v2.9.1 h1:8UwI+ujNU+9Ffkf/YgAm/qM9/eU7Jn8nHzWG721W4rs=
+github.com/compose-spec/compose-go/v2 v2.9.1/go.mod h1:Oky9AZGTRB4E+0VbTPZTUu4Kp+oEMMuwZXZtPPVT1iE=
+github.com/containerd/cgroups/v3 v3.1.0 h1:azxYVj+91ZgSnIBp2eI3k9y2iYQSR/ZQIgh9vKO+HSY=
+github.com/containerd/cgroups/v3 v3.1.0/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
+github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc=
+github.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
+github.com/containerd/containerd/api v1.10.0 h1:5n0oHYVBwN4VhoX9fFykCV9dF1/BvAXeg2F8W6UYq1o=
+github.com/containerd/containerd/api v1.10.0/go.mod h1:NBm1OAk8ZL+LG8R0ceObGxT5hbUYj7CzTmR3xh0DlMM=
+github.com/containerd/containerd/v2 v2.2.0 h1:K7TqcXy+LnFmZaui2DgHsnp2gAHhVNWYaHlx7HXfys8=
+github.com/containerd/containerd/v2 v2.2.0/go.mod h1:YCMjKjA4ZA7egdHNi3/93bJR1+2oniYlnS+c0N62HdE=
+github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=
+github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
+github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
+github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
+github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
+github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
+github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
+github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
+github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
+github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
+github.com/containerd/nydus-snapshotter v0.15.2 h1:qsHI4M+Wwrf6Jr4eBqhNx8qh+YU0dSiJ+WPmcLFWNcg=
+github.com/containerd/nydus-snapshotter v0.15.2/go.mod h1:FfwH2KBkNYoisK/e+KsmNr7xTU53DmnavQHMFOcXwfM=
+github.com/containerd/platforms v1.0.0-rc.2 h1:0SPgaNZPVWGEi4grZdV8VRYQn78y+nm6acgLGv/QzE4=
+github.com/containerd/platforms v1.0.0-rc.2/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4=
+github.com/containerd/plugin v1.0.0 h1:c8Kf1TNl6+e2TtMHZt+39yAPDbouRH9WAToRjex483Y=
+github.com/containerd/plugin v1.0.0/go.mod h1:hQfJe5nmWfImiqT1q8Si3jLv3ynMUIBB47bQ+KexvO8=
+github.com/containerd/stargz-snapshotter v0.16.3 h1:zbQMm8dRuPHEOD4OqAYGajJJUwCeUzt4j7w9Iaw58u4=
+github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8=
+github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
+github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ=
+github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
+github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
+github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
+github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
-github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
-github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
-github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
-github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
-github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
-github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E=
-github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg=
-github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
+github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
+github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
-github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
-github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
-github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
-github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
-github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e h1:n81KvOMrLZa+VWHwST7dun9f0G98X3zREHS1ztYzZKU=
-github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e/go.mod h1:xpWTC2KnJMiDLkoawhsPQcXjvwATEBcbq0xevG2YR9M=
-github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
-github.com/docker/buildx v0.8.1 h1:XcVCxhDkUI8QvkBYAoCl3gcHUGFc03jZDzKdh8IJmPs=
-github.com/docker/buildx v0.8.1/go.mod h1:9v3laulRPwglXHuPulXa5onXQPlgrIq6LnCXc7SgHJw=
-github.com/docker/cli v20.10.3-0.20220309205733-2b52f62e9627+incompatible h1:RWXvuBczWuSIMjI69AnkNklNNVX2gmS0X+15AttGDVk=
-github.com/docker/cli v20.10.3-0.20220309205733-2b52f62e9627+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
-github.com/docker/cli-docs-tool v0.4.0 h1:MdfKoErGEbFqIxQ8an9BsZ+YzKUGd58RBVkV+Q82GPo=
-github.com/docker/cli-docs-tool v0.4.0/go.mod h1:rgW5KKdNpLMBIuH4WQ/1RNh38nH+/Ay5jgL4P0ZMPpY=
-github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
-github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
+github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
+github.com/docker/buildx v0.29.1 h1:58hxM5Z4mnNje3G5NKfULT9xCr8ooM8XFtlfUK9bKaA=
+github.com/docker/buildx v0.29.1/go.mod h1:J4EFv6oxlPiV1MjO0VyJx2u5tLM7ImDEl9zyB8d4wPI=
+github.com/docker/cli v28.5.2+incompatible h1:XmG99IHcBmIAoC1PPg9eLBZPlTrNUAijsHLm8PjhBlg=
+github.com/docker/cli v28.5.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli-docs-tool v0.10.0 h1:bOD6mKynPQgojQi3s2jgcUWGp/Ebqy1SeCr9VfKQLLU=
+github.com/docker/cli-docs-tool v0.10.0/go.mod h1:5EM5zPnT2E7yCLERZmrDA234Vwn09fzRHP4aX1qwp1U=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.8.0+incompatible h1:l9EaZDICImO1ngI+uTifW+ZYvvz7fKISBAKpg+MbWbY=
-github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v20.10.3-0.20220309172631-83b51522df43+incompatible h1:bL4hLpxukr5Ls3bzYrn3LCYIwML+XXCktZHaGBIN3og=
-github.com/docker/docker v20.10.3-0.20220309172631-83b51522df43+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
-github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
-github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
-github.com/docker/go v1.5.1-1/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q=
+github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
+github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
+github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
+github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q=
-github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
-github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
-github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
+github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
-github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
-github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
-github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
-github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
-github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
-github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM=
+github.com/eiannone/keyboard v0.0.0-20220611211555-0d226195f203 h1:XBBHcIb256gUJtLmY22n99HaZTz+r2Z51xUPi01m3wg=
+github.com/eiannone/keyboard v0.0.0-20220611211555-0d226195f203/go.mod h1:E1jcSv8FaEny+OP/5k9UxZVw9YFWGj7eI4KR/iOBqCg=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
-github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
-github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o=
-github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
-github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
-github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fsnotify/fsevents v0.2.0 h1:BRlvlqjvNTfogHfeBOFvSC9N0Ddy+wzQCQukyoD7o/c=
+github.com/fsnotify/fsevents v0.2.0/go.mod h1:B3eEk39i4hz8y1zaWS/wPrAP4O6wkIl7HQwKBr1qH/w=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
-github.com/fvbommel/sortorder v1.0.1 h1:dSnXLt4mJYH25uDDGa3biZNQsozaUWDSWeKJ0qqFfzE=
-github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
-github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
-github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
-github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
-github.com/go-critic/go-critic v0.4.1/go.mod h1:7/14rZGnZbY6E38VEGk2kVhoq6itzc1E68facVDK23g=
-github.com/go-critic/go-critic v0.4.3/go.mod h1:j4O3D4RoIwRqlZw5jJpx0BNfXWWbpcJoKu5cYSe4YmQ=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw=
+github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
-github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
-github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-sql-driver/mysql v1.3.0 h1:pgwjLi/dvffoP9aabwkT3AKpXQM93QARkjFhDDqC1UE=
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
-github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
-github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
-github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
-github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
-github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
-github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
-github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg=
-github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw=
-github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU=
-github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk=
-github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI=
-github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks=
-github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc=
-github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
-github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
-github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
-github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
-github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
-github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
-github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
-github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
-github.com/gofrs/flock v0.8.0 h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY=
-github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
-github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
-github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84=
-github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
-github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
-github.com/gogo/googleapis v1.3.0/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU=
-github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
-github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
-github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
-github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
+github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
+github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
+github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
+github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
-github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
-github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
-github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
+github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
-github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
-github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
-github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
-github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o=
-github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU=
-github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU=
-github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
-github.com/golangci/golangci-lint v1.23.7/go.mod h1:g/38bxfhp4rI7zeWSxcdIeHTQGS58TCak8FYcyCmavQ=
-github.com/golangci/golangci-lint v1.27.0/go.mod h1:+eZALfxIuthdrHPtfM7w/R3POJLjHDfJJw8XZl9xOng=
-github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU=
-github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
-github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
-github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
-github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
-github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI=
-github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
-github.com/golangci/revgrep v0.0.0-20180812185044-276a5c0a1039/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
-github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
-github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
-github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
-github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE=
-github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
-github.com/google/crfs v0.0.0-20191108021818-71d77da419c9/go.mod h1:etGhoOqfwPkooV6aqoX3eBGQOJblqdoc9XvWOeuxpPw=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 h1:jc2UWq7CbdszqeH6qu1ougXMIUBfSy8Pbh/anURYbGI=
+github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
-github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
-github.com/google/go-containerregistry v0.0.0-20191010200024-a3d713f9b7f8/go.mod h1:KyKXa9ciM8+lgMXwOVsXi7UxGrsf9mM61Mzs+xKUrKE=
-github.com/google/go-containerregistry v0.0.0-20191015185424-71da34e4d9b3/go.mod h1:ZXFeSndFcK4vB1NR4voH1Zm38K7ViUNiYtfIBDxrwf0=
-github.com/google/go-containerregistry v0.1.2/go.mod h1:GPivBPgdAyd2SU+vf6EpsgOtWDuPqjW0hJZt4rNdTZ4=
-github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
-github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
-github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM=
-github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
-github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE=
-github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no=
-github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
-github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
-github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s=
-github.com/google/wire v0.4.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU=
-github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
-github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
-github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
-github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
-github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
-github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
-github.com/gookit/color v1.2.4/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg=
-github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/goreleaser/goreleaser v0.136.0/go.mod h1:wiKrPUeSNh6Wu8nUHxZydSOVQ/OZvOaO7DTtFqie904=
-github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhjYcR6G9w=
-github.com/goreleaser/nfpm v1.3.0/go.mod h1:w0p7Kc9TAUgWMyrub63ex3M2Mgw88M4GZXoTq5UCb40=
-github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
-github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
-github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
-github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
-github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
-github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s=
-github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
-github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
-github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
-github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
+github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
-github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok=
-github.com/hanwen/go-fuse/v2 v2.0.3/go.mod h1:0EQM6aH2ctVpvZ6a+onrQ/vaykxh2GH7hy3e13vzTUY=
-github.com/hanwen/go-fuse/v2 v2.1.1-0.20220112183258-f57e95bda82d/go.mod h1:B1nGE/6RBFyBRC1RRnf23UpwCdyJ31eukw34oAKukAc=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840/go.mod h1:Abjk0jbRkDaNCzsRhOv2iDCofYpX1eVsjozoiK63qLA=
-github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
-github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
-github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
-github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw=
-github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c/go.mod h1:fHzc09UnyJyqyW+bFuq864eh+wC7dj65aXmXLRe5to0=
-github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ=
-github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A=
+github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
+github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
+github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
-github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU=
+github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
-github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
-github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
-github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
-github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
-github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea/go.mod h1:QMdK4dGB3YhEW2BmA1wgGpPYI3HZy/5gD705PXKUVSg=
-github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik=
-github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
-github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s=
-github.com/jinzhu/gorm v1.9.2/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo=
-github.com/jinzhu/gorm v1.9.11 h1:gaHGvE+UnWGlbWG4Y3FUwY1EcZ5n6S9WtqBA/uySMLE=
-github.com/jinzhu/gorm v1.9.11/go.mod h1:bu/pK8szGZ2puuErfU0RwyeNdsf3e6nCX/noXaVxkfw=
-github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
-github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
-github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
-github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
-github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
-github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
-github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
-github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo=
-github.com/jmoiron/sqlx v0.0.0-20180124204410-05cef0741ade/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU=
-github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
-github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
-github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
-github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf h1:FtEj8sfIcaaBfAKrE1Cwb61YDtYq9JxChK1c7AKce7s=
+github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf/go.mod h1:yrqSXGoD/4EKfF26AOGzscPOgTTJcyAwM2rpixWT+t4=
+github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8 h1:CZkYfurY6KGhVtlalI4QwQ6T0Cu6iuY3e0x5RLu96WE=
+github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo=
+github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d h1:jRQLvyVGL+iVtDElaEIDdKwpPqUIZJfzkNLV34htpEc=
+github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
+github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
-github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
-github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
-github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
-github.com/kisom/goutils v1.1.0/go.mod h1:+UBTfd78habUYWFbNWTJNG+jNG/i/lGURakr4A/yNRw=
-github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
-github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
+github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
+github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
-github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI=
-github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28/go.mod h1:T/T7jsxVqf9k/zYOqbgNAsANsjxTd1Yq3htjDhQ1H0c=
-github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
-github.com/lib/pq v0.0.0-20180201184707-88edab080323/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
-github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
-github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
-github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU=
-github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
-github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM=
+github.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
-github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
-github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
-github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
-github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
-github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
-github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
-github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
-github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
-github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
+github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
+github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
+github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
+github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
+github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
-github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
-github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ=
-github.com/moby/buildkit v0.10.0-rc2.0.20220308185020-fdecd0ae108b h1:plbnJxjht8Z6D3c/ga79D1+VaA/IUfNVp08J3lcDgI8=
-github.com/moby/buildkit v0.10.0-rc2.0.20220308185020-fdecd0ae108b/go.mod h1:WvwAZv8aRScHkqc/+X46cRC2CKMKpqcaX+pRvUTtPes=
+github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/moby/buildkit v0.25.2 h1:mReLKDPv05cqk6o/u3ixq2/iTsWGHoUO5Zg3lojrQTk=
+github.com/moby/buildkit v0.25.2/go.mod h1:phM8sdqnvgK2y1dPDnbwI6veUCXHOZ6KFSl6E164tkc=
+github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
+github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
+github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
+github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
-github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
-github.com/moby/sys/mount v0.1.0/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74=
-github.com/moby/sys/mount v0.1.1/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74=
-github.com/moby/sys/mount v0.3.0 h1:bXZYMmq7DBQPwHRxH/MG+u9+XF90ZOwoXpHTOznMGp0=
-github.com/moby/sys/mount v0.3.0/go.mod h1:U2Z3ur2rXPFrFmy4q6WMwWrBOAQGYtYTRVM8BIvzbwk=
-github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
-github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
-github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
-github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo=
-github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
-github.com/moby/sys/signal v0.6.0 h1:aDpY94H8VlhTGa9sNYUFCFsMZIUh5wm0B6XkIoJj/iY=
-github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
-github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
-github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc=
-github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
-github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
-github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ=
-github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
-github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
-github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
+github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
+github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
+github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
+github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
+github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk=
+github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
+github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
+github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
+github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
+github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
+github.com/moby/sys/signal v0.7.1 h1:PrQxdvxcGijdo6UXXo/lU/TvHUWyPhj7UOpSo8tuvk0=
+github.com/moby/sys/signal v0.7.1/go.mod h1:Se1VGehYokAkrSQwL4tDzHvETwUZlnY7S5XtQ50mQp8=
+github.com/moby/sys/symlink v0.3.0 h1:GZX89mEZ9u53f97npBy4Rc3vJKj7JBDj/PN2I22GrNU=
+github.com/moby/sys/symlink v0.3.0/go.mod h1:3eNdhduHmYPcgsJtZXW1W4XUJdZGBIkttZ8xKqPUJq0=
+github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
+github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
+github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
+github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
+github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
+github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
-github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
-github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
-github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
-github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8=
-github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
-github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c=
-github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
-github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
-github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
-github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
-github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
-github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc92/go.mod h1:X1zlU4p7wOlX4+WRCz+hvlRv8phdL7UqbYD+vQwNMmE=
-github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8=
-github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc=
-github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
-github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
-github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU=
-github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
-github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
+github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
+github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
+github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
+github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/selinux v1.12.0 h1:6n5JV4Cf+4y0KNXW48TLj5DwfXpvWlxXplUkdTrmPb8=
+github.com/opencontainers/selinux v1.12.0/go.mod h1:BTPX+bjVbWGXw7ZZWUbdENt8w0htPSrlgOOysQaU62U=
+github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
-github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
-github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
-github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs=
-github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
-github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
-github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
-github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pivotal/image-relocation v0.0.0-20191111101224-e94aff6df06c/go.mod h1:/JNbQwGylYm3AQh8q+MBF8e/h0W1Jy20JGTvozuXYTE=
+github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8=
+github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I=
+github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs=
+github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
+github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
+github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
-github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
-github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
-github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
+github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
-github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
+github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
-github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/qri-io/jsonpointer v0.1.0 h1:OcTtTmorodUCRc2CZhj/ZwOET8zVj6uo0ArEmzoThZI=
-github.com/qri-io/jsonpointer v0.1.0/go.mod h1:DnJPaYgiKu56EuDp8TU5wFLdZIcAnb/uH9v37ZaMV64=
-github.com/qri-io/jsonschema v0.1.1 h1:t//Doa/gvMqJ0bDhG7PGIKfaWGGxRVaffp+bcvBGGEk=
-github.com/qri-io/jsonschema v0.1.1/go.mod h1:QpzJ6gBQ0GYgGmh7mDQ1YsvvhSgE4rYj0k8t5MBOmUY=
-github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
-github.com/quasilyte/go-ruleguard v0.1.2-0.20200318202121-b00d7a75d3d8/go.mod h1:CGFX09Ci3pq9QZdj86B+VGIdNj4VyCo2iPOGS9esB/k=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
-github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
-github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
-github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=
-github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
+github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
+github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryancurrah/gomodguard v1.0.4/go.mod h1:9T/Cfuxs5StfsocWr4WzDL36HqnX0fVb9d5fSEaLhoE=
-github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
-github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
-github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b h1:jUK33OXuZP/l6babJtnLo1qsGvq6G9so9KMflGAm4YA=
-github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b/go.mod h1:8458kAagoME2+LN5//WxE71ysZ3B7r22fdgb7qVmXSY=
-github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I=
-github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
-github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
-github.com/securego/gosec v0.0.0-20200103095621-79fbf3af8d83/go.mod h1:vvbZ2Ae7AzSq3/kywjUDxSNq2SJ27RxCz2un0H3ePqE=
-github.com/securego/gosec v0.0.0-20200401082031-e946c8c39989/go.mod h1:i9l/TNj+yDFh9SZXUTvspXTjbFXgZGP/UvhU1S65A4A=
-github.com/securego/gosec/v2 v2.3.0/go.mod h1:UzeVyUXbxukhLeHKV3VVqo7HdoQR9MrRfFmZYotn8ME=
-github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
-github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc=
-github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc=
-github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
-github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
-github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw=
+github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
+github.com/secure-systems-lab/go-securesystemslib v0.6.0 h1:T65atpAVCJQK14UA57LMdZGpHi4QYSH/9FZyNGqMYIA=
+github.com/secure-systems-lab/go-securesystemslib v0.6.0/go.mod h1:8Mtpo9JKks/qhPG4HGZ2LGMvrPbzuxwfz/f/zLfEWkk=
+github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI=
+github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
-github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
-github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
-github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
-github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE=
-github.com/sourcegraph/go-diff v0.5.3/go.mod h1:v9JDtjCE4HHHCZGId75rg8gkKKa98RVjBcBGsVmMmak=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
-github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
-github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
-github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
-github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
-github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
-github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
-github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
-github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
-github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
-github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
+github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
+github.com/spdx/tools-golang v0.5.5 h1:61c0KLfAcNqAjlg6UNMdkwpMernhw3zVRwDZ2x9XOmk=
+github.com/spdx/tools-golang v0.5.5/go.mod h1:MVIsXx8ZZzaRWNQpUDhC4Dud34edUYJYecciXgrw5vE=
+github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94 h1:JmfC365KywYwHB946TTiQWEb8kqPY+pybPLoGE9GgVk=
+github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
+github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
+github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
+github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431 h1:XTHrT015sxHyJ5FnQ0AeemSspZWaDq7DoTRW0EVsDCE=
+github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
+github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c h1:2EejZtjFjKJGk71ANb+wtFK5EjUzUkEM3R0xnp559xg=
+github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
-github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
-github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
-github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
-github.com/tetafro/godot v0.3.7/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0=
-github.com/tetafro/godot v0.4.2/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0=
-github.com/theupdateframework/notary v0.6.1 h1:7wshjstgS9x9F5LuB1L5mBI2xNMObWqjz+cjWoom6l0=
-github.com/theupdateframework/notary v0.6.1/go.mod h1:MOfgIfmox8s7/7fduvB2xyPPMJCrjRLRizA8OFwpnKY=
-github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
-github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
-github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
-github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
-github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
-github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
-github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
-github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85/go.mod h1:a7cilN64dG941IOXfhJhlH0qB92hxJ9A1ewrdUmJ6xo=
-github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274/go.mod h1:oPAfvw32vlUJSjyDcQ3Bu0nb2ON2B+G0dtVN/SZNJiA=
-github.com/tonistiigi/fsutil v0.0.0-20220315205639-9ed612626da3 h1:T1pEe+WB3SCPVAfVquvfPfagKZU2Z8c1OP3SuGB+id0=
-github.com/tonistiigi/fsutil v0.0.0-20220315205639-9ed612626da3/go.mod h1:oPAfvw32vlUJSjyDcQ3Bu0nb2ON2B+G0dtVN/SZNJiA=
-github.com/tonistiigi/go-actions-cache v0.0.0-20211202175116-9642704158ff/go.mod h1:qqvyZqkfwkoJuPU/bw61bItaoO0SJ8YSW0vSVRRvsRg=
-github.com/tonistiigi/go-archvariant v1.0.0/go.mod h1:TxFmO5VS6vMq2kvs3ht04iPXtu2rUT/erOnGFYfk5Ho=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
+github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
+github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA=
+github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375/go.mod h1:xRroudyp5iVtxKqZCrA6n2TLFRBf8bmnjr1UD4x+z7g=
+github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323 h1:r0p7fK56l8WPequOaR3i9LBqfPtEdXIQbUTzT55iqT4=
+github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323/go.mod h1:3Iuxbr0P7D3zUzBMAZB+ois3h/et0shEz0qApgHYGpY=
+github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f h1:MoxeMfHAe5Qj/ySSBfL8A7l1V+hxuluj8owsIEEZipI=
+github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f/go.mod h1:BKdcez7BiVtBvIcef90ZPc6ebqIWr4JWD7+EvLm6J98=
+github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 h1:2f304B10LaZdB8kkVEaoXvAMVan2tl9AiK4G0odjQtE=
+github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE=
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
-github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f h1:DLpt6B5oaaS8jyXHa9VA4rrZloBVPVXeCtrOsrFauxc=
-github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc=
-github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
-github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
-github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
-github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
-github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM=
-github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
-github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
-github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
-github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
-github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
-github.com/vdemeester/k8s-pkg-credentialprovider v1.17.4/go.mod h1:inCTmtUdr5KJbreVojo06krnTgaeAz/Z7lynpPk/Q2c=
-github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
-github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
-github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
-github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
-github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
-github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
-github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
-github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
-github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
-github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
-github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
-github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
-github.com/weppos/publicsuffix-go v0.4.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k=
-github.com/weppos/publicsuffix-go v0.5.0 h1:rutRtjBJViU/YjcI5d80t4JAVvDltS6bciJg2K1HrLU=
-github.com/weppos/publicsuffix-go v0.5.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
-github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
-github.com/xanzy/go-gitlab v0.32.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
-github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
-github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
-github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
-github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab h1:H6aJ0yKQ0gF49Qb2z5hI1UHxSQt4JMyxebFR15KnApw=
+github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc=
+github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo=
+github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
+github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
+github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
-github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
-github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
-github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8=
-github.com/zclconf/go-cty v1.4.0/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ=
-github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk=
-github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
-github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE=
-github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is=
-github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e h1:mvOa4+/DXStR4ZXOks/UsjeFdn5O5JpLUtzqk9U8xXw=
-github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e/go.mod h1:w7kd3qXHh8FNaczNjslXqvFQiv5mMWRXlL9klTUAHc8=
-github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb h1:vxqkjztXSaPVDc8FQCdHTaejm2x747f6yPbnu1h2xkg=
-github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb/go.mod h1:29UiAJNsiVdvTBFCJW8e3q6dcDbOoPkhMgttOSCIMMY=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
-go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
-go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
-go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
-go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
-go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
-go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
-go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
-go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
-go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=
-go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M=
-go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
-go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
-go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 h1:n9b7AAdbQtQ0k9dm0Dm2/KUcUqtG8i2O15KzNaDze8c=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0/go.mod h1:LsankqVDx4W+RhZNA5uWarULII/MBhF5qwCYxTuyXjs=
-go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 h1:Wjp9vsVSIEyvdiaECfqxY9xBqQ7JaSCGtvHgR4doXZk=
-go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0/go.mod h1:vHItvsnJtp7ES++nFLLFBzUWny7fJQSvTlxFcqQGUr4=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 h1:SLme4Porm+UwX0DdHMxlwRt7FzPSE0sys81bet2o0pU=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0/go.mod h1:tLYsuf2v8fZreBVwp9gVMhefZlLFZaUiNVSq8QxXRII=
-go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
-go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
-go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk=
-go.opentelemetry.io/otel v1.4.1 h1:QbINgGDDcoQUoMJa2mMaWno49lja9sHwp6aoa2n3a4g=
-go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4=
-go.opentelemetry.io/otel/exporters/jaeger v1.4.1/go.mod h1:ZW7vkOu9nC1CxsD8bHNHCia5JUbwP39vxgd1q4Z5rCI=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg=
-go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 h1:WPpPsAAs8I2rA47v5u0558meKmmwm1Dj99ZbqCV8sZ8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1/go.mod h1:o5RW5o2pKpJLD5dNTCmjF1DorYwMeFJmb/rKr5sLaa8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1/go.mod h1:c6E4V3/U+miqjs/8l950wggHGL1qzlp0Ypj9xoGrPqo=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1/go.mod h1:VwYo0Hak6Efuy0TXsZs8o1hnV3dHDPNtDbycG0hI8+M=
-go.opentelemetry.io/otel/internal/metric v0.27.0 h1:9dAVGAfFiiEq5NVB9FUJ5et+btbDQAUIJehJ+ikyryk=
-go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw=
-go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
-go.opentelemetry.io/otel/metric v0.27.0 h1:HhJPsGhJoKRSegPQILFbODU56NS/L1UE4fS1sC5kIwQ=
-go.opentelemetry.io/otel/metric v0.27.0/go.mod h1:raXDJ7uP2/Jc0nVZWQjJtzoyssOYWu/+pjZqRzfvZ7g=
-go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
-go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
-go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
-go.opentelemetry.io/otel/sdk v1.4.1 h1:J7EaW71E0v87qflB4cDolaqq3AcujGrtyIPGQoZOB0Y=
-go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE=
-go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
-go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
-go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
-go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
-go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE=
-go.opentelemetry.io/otel/trace v1.4.1 h1:O+16qcdTrT7zxv2J6GejTPFinSwA++cYerC5iSiF8EQ=
-go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
-go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c=
-go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
-go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
-gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI=
-golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4=
-golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 h1:0tY123n7CdWMem7MOVdKOt0YfshufLCwfE5Bob+hQuM=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0/go.mod h1:CosX/aS4eHnG9D7nESYpV753l4j9q5j3SL/PUYd2lR8=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
+go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
+go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0/go.mod h1:CXIWhUomyWBG/oY2/r/kLp6K/cmx9e/7DLpBuuGdLCA=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 h1:0NIXxOCFx+SKbhCVxwl3ETG8ClLPAa0KuKV6p3yhxP8=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0/go.mod h1:ChZSJbbfbl/DcRZNc9Gqh6DYGlfjw4PvO1pEOZH1ZsE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
+go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
+go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
+go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
+go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
+go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
+go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
+go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
+go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
+go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
+go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
+go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
+go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
+go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191028145041-f83a4685e152/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
-golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20211202192323-5770296d904e h1:MUP6MR3rJ7Gk9LEia0LP2ytiH6MuCfs7qYz+47jGdD8=
-golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
+golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
+golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM=
-golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
+golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
+golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190830141801-acfa387b8d69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200917073148-efd3b9a0ff20/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201013081832-0aaa2718063a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210313202042-bd2e13477e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
-golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
+golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY=
-golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
-golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
+golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
-golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
+golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
-golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191014205221-18e3458ac98b/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113232020-e2727e816f5a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200102140908-9497f49d5709/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204192400-7124308813f3/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200331202046-9d5940d49312/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200502202811-ed308ab3e770/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
-gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
-gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
-google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU=
-google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw=
-google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
-google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
-google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
-google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
-google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
-google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
-google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
-google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
+gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
+google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b h1:ULiyYQ0FdsJhwwZUwbaXpZF5yUE3h+RA+gxvBu37ucc=
+google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:oDOGiMSXHL4sDTJvFvIB9nRQCGdLP1o/iVaqQK8zB+M=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
+google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
+google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
+google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
+google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII=
+gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/dancannon/gorethink.v3 v3.0.5 h1:/g7PWP7zUS6vSNmHSDbjCHQh1Rqn8Jy6zSMQxAsBSMQ=
-gopkg.in/dancannon/gorethink.v3 v3.0.5/go.mod h1:GXsi1e3N2OcKhcP6nsYABTiUejbWMFO4GY5a4pEaeEc=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg=
-gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
-gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU=
-gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I=
-gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
-gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
-gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1 h1:d4KQkxAaAiRY2h5Zqis161Pv91A37uZyJOx73duwUwM=
+gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
-gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
-gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk=
-gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ=
-grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
-honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.22.4 h1:UvyHW0ezB2oIgHAxlYoo6UJQObYXU7awuNarwoHEOjw=
-k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk=
-k8s.io/apimachinery v0.22.4 h1:9uwcvPpukBw/Ri0EUmWz+49cnFtaoiyEhQTK+xOe7Ck=
-k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0=
-k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I=
-k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
-k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
-k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
-k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ=
-k8s.io/client-go v0.22.4 h1:aAQ1Wk+I3bjCNk35YWUqbaueqrIonkfDPJSPDDe8Kfg=
-k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA=
-k8s.io/cloud-provider v0.17.4/go.mod h1:XEjKDzfD+b9MTLXQFlDGkk6Ho8SGMpaU8Uugx/KNK9U=
-k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
-k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
-k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE=
-k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
-k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
-k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
-k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI=
-k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
-k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
-k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4=
-k8s.io/cri-api v0.24.0-alpha.3/go.mod h1:c/NLI5Zdyup5+oEYqFO2IE32ptofNiZpS1nL2y51gAg=
-k8s.io/csi-translation-lib v0.17.4/go.mod h1:CsxmjwxEI0tTNMzffIAcgR9lX4wOh6AKHdxQrT7L0oo=
-k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
-k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
-k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
-k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
-k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
-k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
-k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
-k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
-k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
-k8s.io/kubernetes v1.11.10/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js=
-k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
-k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE=
-k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
-modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
-modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
-modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
-modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
-mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
-mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
-mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw=
-mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7/go.mod h1:HGC5lll35J70Y5v7vCGb9oLhHoScFwkHDJm/05RdSTc=
-pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
-sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU=
-sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
-sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
-sourcegraph.com/sqs/pbtypes v1.0.0/go.mod h1:3AciMUv4qUuRHRHhOG4TZOB+72GdPVz5k+c648qsFS4=
-vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
+gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
+tags.cncf.io/container-device-interface v1.0.1 h1:KqQDr4vIlxwfYh0Ed/uJGVgX+CHAkahrgabg6Q8GYxc=
+tags.cncf.io/container-device-interface v1.0.1/go.mod h1:JojJIOeW3hNbcnOH2q0NrWNha/JuHoDZcmYxAZwb2i0=
diff --git a/internal/desktop/client.go b/internal/desktop/client.go
new file mode 100644
index 00000000000..f46a6443cc3
--- /dev/null
+++ b/internal/desktop/client.go
@@ -0,0 +1,145 @@
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package desktop
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+
+ "github.com/docker/compose/v5/internal"
+ "github.com/docker/compose/v5/internal/memnet"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+)
+
+// identify this client in the logs
+var userAgent = "compose/" + internal.Version
+
+// Client for integration with Docker Desktop features.
+type Client struct {
+ apiEndpoint string
+ client *http.Client
+}
+
+// NewClient creates a Desktop integration client for the provided in-memory
+// socket address (AF_UNIX or named pipe).
+func NewClient(apiEndpoint string) *Client {
+ var transport http.RoundTripper = &http.Transport{
+ DisableCompression: true,
+ DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
+ return memnet.DialEndpoint(ctx, apiEndpoint)
+ },
+ }
+ transport = otelhttp.NewTransport(transport)
+
+ return &Client{
+ apiEndpoint: apiEndpoint,
+ client: &http.Client{Transport: transport},
+ }
+}
+
+func (c *Client) Endpoint() string {
+ return c.apiEndpoint
+}
+
+// Close releases any open connections.
+func (c *Client) Close() error {
+ c.client.CloseIdleConnections()
+ return nil
+}
+
+type PingResponse struct {
+ ServerTime int64 `json:"serverTime"`
+}
+
+// Ping is a minimal API used to ensure that the server is available.
+func (c *Client) Ping(ctx context.Context) (*PingResponse, error) {
+ req, err := c.newRequest(ctx, http.MethodGet, "/ping", http.NoBody)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := c.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ _ = resp.Body.Close()
+ }()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
+ }
+
+ var ret PingResponse
+ if err := json.NewDecoder(resp.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return &ret, nil
+}
+
+type FeatureFlagResponse map[string]FeatureFlagValue
+
+type FeatureFlagValue struct {
+ Enabled bool `json:"enabled"`
+}
+
+func (c *Client) FeatureFlags(ctx context.Context) (FeatureFlagResponse, error) {
+ req, err := c.newRequest(ctx, http.MethodGet, "/features", http.NoBody)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := c.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ _ = resp.Body.Close()
+ }()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
+ }
+
+ var ret FeatureFlagResponse
+ if err := json.NewDecoder(resp.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+func (c *Client) newRequest(ctx context.Context, method, path string, body io.Reader) (*http.Request, error) {
+ req, err := http.NewRequestWithContext(ctx, method, backendURL(path), body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("User-Agent", userAgent)
+ return req, nil
+}
+
+// backendURL generates a URL for the given API path.
+//
+// NOTE: Custom transport handles communication. The host is to create a valid
+// URL for the Go http.Client that is also descriptive in error/logs.
+func backendURL(path string) string {
+ return "http://docker-desktop/" + strings.TrimPrefix(path, "/")
+}
diff --git a/internal/desktop/client_test.go b/internal/desktop/client_test.go
new file mode 100644
index 00000000000..0355dd501a3
--- /dev/null
+++ b/internal/desktop/client_test.go
@@ -0,0 +1,52 @@
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package desktop
+
+import (
+ "context"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestClientPing(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipped in short mode - test connects to Docker Desktop")
+ }
+ desktopEndpoint := os.Getenv("COMPOSE_TEST_DESKTOP_ENDPOINT")
+ if desktopEndpoint == "" {
+ t.Skip("Skipping - COMPOSE_TEST_DESKTOP_ENDPOINT not defined")
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+
+ client := NewClient(desktopEndpoint)
+ t.Cleanup(func() {
+ _ = client.Close()
+ })
+
+ now := time.Now()
+
+ ret, err := client.Ping(ctx)
+ require.NoError(t, err)
+
+ serverTime := time.Unix(0, ret.ServerTime)
+ require.True(t, now.Before(serverTime))
+}
diff --git a/internal/experimental/experimental.go b/internal/experimental/experimental.go
new file mode 100644
index 00000000000..d6378024307
--- /dev/null
+++ b/internal/experimental/experimental.go
@@ -0,0 +1,68 @@
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package experimental
+
+import (
+ "context"
+ "os"
+ "strconv"
+
+ "github.com/docker/compose/v5/internal/desktop"
+)
+
+// envComposeExperimentalGlobal can be set to a falsy value (e.g. 0, false) to
+// globally opt-out of any experimental features in Compose.
+const envComposeExperimentalGlobal = "COMPOSE_EXPERIMENTAL"
+
+// State of experiments (enabled/disabled) based on environment and local config.
+type State struct {
+ // active is false if experiments have been opted-out of globally.
+ active bool
+ desktopValues desktop.FeatureFlagResponse
+}
+
+func NewState() *State {
+ // experimental features have individual controls, but users can opt out
+ // of ALL experiments easily if desired
+ experimentsActive := true
+ if v := os.Getenv(envComposeExperimentalGlobal); v != "" {
+ experimentsActive, _ = strconv.ParseBool(v)
+ }
+ return &State{
+ active: experimentsActive,
+ }
+}
+
+func (s *State) Load(ctx context.Context, client *desktop.Client) error {
+ if !s.active {
+ // user opted out of experiments globally, no need to load state from
+ // Desktop
+ return nil
+ }
+
+ if client == nil {
+ // not running under Docker Desktop
+ return nil
+ }
+
+ desktopValues, err := client.FeatureFlags(ctx)
+ if err != nil {
+ return err
+ }
+ s.desktopValues = desktopValues
+ return nil
+}
diff --git a/internal/locker/pidfile.go b/internal/locker/pidfile.go
new file mode 100644
index 00000000000..08dcea1f3a5
--- /dev/null
+++ b/internal/locker/pidfile.go
@@ -0,0 +1,35 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package locker
+
+import (
+ "fmt"
+ "path/filepath"
+)
+
+type Pidfile struct {
+ path string
+}
+
+func NewPidfile(projectName string) (*Pidfile, error) {
+ run, err := runDir()
+ if err != nil {
+ return nil, err
+ }
+ path := filepath.Join(run, fmt.Sprintf("%s.pid", projectName))
+ return &Pidfile{path: path}, nil
+}
diff --git a/internal/locker/pidfile_unix.go b/internal/locker/pidfile_unix.go
new file mode 100644
index 00000000000..484b65d8250
--- /dev/null
+++ b/internal/locker/pidfile_unix.go
@@ -0,0 +1,29 @@
+//go:build !windows
+
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package locker
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/pidfile"
+)
+
+func (f *Pidfile) Lock() error {
+ return pidfile.Write(f.path, os.Getpid())
+}
diff --git a/internal/locker/pidfile_windows.go b/internal/locker/pidfile_windows.go
new file mode 100644
index 00000000000..adc151827dc
--- /dev/null
+++ b/internal/locker/pidfile_windows.go
@@ -0,0 +1,48 @@
+//go:build windows
+
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package locker
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/pidfile"
+ "github.com/mitchellh/go-ps"
+)
+
+func (f *Pidfile) Lock() error {
+ newPID := os.Getpid()
+ err := pidfile.Write(f.path, newPID)
+ if err != nil {
+ // Get PID registered in the file
+ pid, errPid := pidfile.Read(f.path)
+ if errPid != nil {
+ return err
+ }
+ // Some users faced issues on Windows where the process written in the pidfile was identified as still existing
+ // So we used a 2nd process library to verify if this not a false positive feedback
+ // Check if the process exists
+ process, errPid := ps.FindProcess(pid)
+ if process == nil && errPid == nil {
+ // If the process does not exist, remove the pidfile and try to lock again
+ _ = os.Remove(f.path)
+ return pidfile.Write(f.path, newPID)
+ }
+ }
+ return err
+}
diff --git a/cmd/formatter/multierrformat.go b/internal/locker/runtime.go
similarity index 59%
rename from cmd/formatter/multierrformat.go
rename to internal/locker/runtime.go
index 28d538f5675..e60db5cc15e 100644
--- a/cmd/formatter/multierrformat.go
+++ b/internal/locker/runtime.go
@@ -14,25 +14,22 @@
limitations under the License.
*/
-package formatter
+package locker
import (
- "strings"
-
- "github.com/hashicorp/go-multierror"
+ "os"
)
-// SetMultiErrorFormat set cli default format for multi-errors
-func SetMultiErrorFormat(errs *multierror.Error) {
- if errs != nil {
- errs.ErrorFormat = formatErrors
+func runDir() (string, error) {
+ run, ok := os.LookupEnv("XDG_RUNTIME_DIR")
+ if ok {
+ return run, nil
}
-}
-func formatErrors(errs []error) string {
- messages := make([]string, len(errs))
- for i, err := range errs {
- messages[i] = "Error: " + err.Error()
+ path, err := osDependentRunDir()
+ if err != nil {
+ return "", err
}
- return strings.Join(messages, "\n")
+ err = os.MkdirAll(path, 0o700)
+ return path, err
}
diff --git a/internal/locker/runtime_darwin.go b/internal/locker/runtime_darwin.go
new file mode 100644
index 00000000000..127a5fb048d
--- /dev/null
+++ b/internal/locker/runtime_darwin.go
@@ -0,0 +1,34 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package locker
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// Based on https://github.com/adrg/xdg
+// Licensed under MIT License (MIT)
+// Copyright (c) 2014 Adrian-George Bostan
+
+func osDependentRunDir() (string, error) {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(home, "Library", "Application Support", "com.docker.compose"), nil
+}
diff --git a/internal/locker/runtime_unix.go b/internal/locker/runtime_unix.go
new file mode 100644
index 00000000000..5daf61aac52
--- /dev/null
+++ b/internal/locker/runtime_unix.go
@@ -0,0 +1,43 @@
+//go:build linux || openbsd || freebsd
+
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package locker
+
+import (
+ "os"
+ "path/filepath"
+ "strconv"
+)
+
+// Based on https://github.com/adrg/xdg
+// Licensed under MIT License (MIT)
+// Copyright (c) 2014 Adrian-George Bostan
+
+func osDependentRunDir() (string, error) {
+ run := filepath.Join("run", "user", strconv.Itoa(os.Getuid()))
+ if _, err := os.Stat(run); err == nil {
+ return run, nil
+ }
+
+ // /run/user/$uid is set by pam_systemd, but might not be present, especially in containerized environments
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(home, ".docker", "docker-compose"), nil
+}
diff --git a/internal/locker/runtime_windows.go b/internal/locker/runtime_windows.go
new file mode 100644
index 00000000000..4d4451b61c6
--- /dev/null
+++ b/internal/locker/runtime_windows.go
@@ -0,0 +1,49 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package locker
+
+import (
+ "os"
+ "path/filepath"
+
+ "golang.org/x/sys/windows"
+)
+
+// Based on https://github.com/adrg/xdg
+// Licensed under MIT License (MIT)
+// Copyright (c) 2014 Adrian-George Bostan
+
+func osDependentRunDir() (string, error) {
+ flags := []uint32{windows.KF_FLAG_DEFAULT, windows.KF_FLAG_DEFAULT_PATH}
+ for _, flag := range flags {
+ p, _ := windows.KnownFolderPath(windows.FOLDERID_LocalAppData, flag|windows.KF_FLAG_DONT_VERIFY)
+ if p != "" {
+ return filepath.Join(p, "docker-compose"), nil
+ }
+ }
+
+ appData, ok := os.LookupEnv("LOCALAPPDATA")
+ if ok {
+ return filepath.Join(appData, "docker-compose"), nil
+ }
+
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(home, "AppData", "Local", "docker-compose"), nil
+}
diff --git a/internal/memnet/conn.go b/internal/memnet/conn.go
new file mode 100644
index 00000000000..224bec78830
--- /dev/null
+++ b/internal/memnet/conn.go
@@ -0,0 +1,50 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package memnet
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strings"
+)
+
+func DialEndpoint(ctx context.Context, endpoint string) (net.Conn, error) {
+ if addr, ok := strings.CutPrefix(endpoint, "unix://"); ok {
+ return Dial(ctx, "unix", addr)
+ }
+ if addr, ok := strings.CutPrefix(endpoint, "npipe://"); ok {
+ return Dial(ctx, "npipe", addr)
+ }
+ return nil, fmt.Errorf("unsupported protocol for address: %s", endpoint)
+}
+
+func Dial(ctx context.Context, network, addr string) (net.Conn, error) {
+ var d net.Dialer
+ switch network {
+ case "unix":
+ if err := validateSocketPath(addr); err != nil {
+ return nil, err
+ }
+ return d.DialContext(ctx, "unix", addr)
+ case "npipe":
+ // N.B. this will return an error on non-Windows
+ return dialNamedPipe(ctx, addr)
+ default:
+ return nil, fmt.Errorf("unsupported network: %s", network)
+ }
+}
diff --git a/internal/memnet/conn_unix.go b/internal/memnet/conn_unix.go
new file mode 100644
index 00000000000..e151984848a
--- /dev/null
+++ b/internal/memnet/conn_unix.go
@@ -0,0 +1,39 @@
+//go:build !windows
+
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package memnet
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "syscall"
+)
+
+const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
+
+func dialNamedPipe(_ context.Context, _ string) (net.Conn, error) {
+ return nil, fmt.Errorf("named pipes are only available on Windows")
+}
+
+func validateSocketPath(addr string) error {
+ if len(addr) > maxUnixSocketPathSize {
+ return fmt.Errorf("socket address is too long: %s", addr)
+ }
+ return nil
+}
diff --git a/internal/memnet/conn_windows.go b/internal/memnet/conn_windows.go
new file mode 100644
index 00000000000..b7f7d9ea8fa
--- /dev/null
+++ b/internal/memnet/conn_windows.go
@@ -0,0 +1,33 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package memnet
+
+import (
+ "context"
+ "net"
+
+ "github.com/Microsoft/go-winio"
+)
+
+func dialNamedPipe(ctx context.Context, addr string) (net.Conn, error) {
+ return winio.DialPipeContext(ctx, addr)
+}
+
+func validateSocketPath(addr string) error {
+ // AF_UNIX sockets do not have strict path limits on Windows
+ return nil
+}
diff --git a/internal/oci/push.go b/internal/oci/push.go
new file mode 100644
index 00000000000..fe3ca586651
--- /dev/null
+++ b/internal/oci/push.go
@@ -0,0 +1,222 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package oci
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "path/filepath"
+ "slices"
+ "time"
+
+ "github.com/containerd/containerd/v2/core/remotes"
+ pusherrors "github.com/containerd/containerd/v2/core/remotes/errors"
+ "github.com/distribution/reference"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/image-spec/specs-go"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+const (
+ // ComposeProjectArtifactType is the OCI 1.1-compliant artifact type value
+ // for the generated image manifest.
+ ComposeProjectArtifactType = "application/vnd.docker.compose.project"
+ // ComposeYAMLMediaType is the media type for each layer (Compose file)
+ // in the image manifest.
+ ComposeYAMLMediaType = "application/vnd.docker.compose.file+yaml"
+ // ComposeEmptyConfigMediaType is a media type used for the config descriptor
+ // when doing OCI 1.0-style pushes.
+ //
+ // The content is always `{}`, the same as a normal empty descriptor, but
+ // the specific media type allows clients to fall back to the config media
+ // type to recognize the manifest as a Compose project since the artifact
+ // type field is not available in OCI 1.0.
+ //
+ // This is based on guidance from the OCI 1.1 spec:
+ // > Implementers note: artifacts have historically been created without
+ // > an artifactType field, and tooling to work with artifacts should
+ // > fallback to the config.mediaType value.
+ ComposeEmptyConfigMediaType = "application/vnd.docker.compose.config.empty.v1+json"
+ // ComposeEnvFileMediaType is the media type for each Env File layer in the image manifest.
+ ComposeEnvFileMediaType = "application/vnd.docker.compose.envfile"
+)
+
+// clientAuthStatusCodes are client (4xx) errors that are authentication
+// related.
+var clientAuthStatusCodes = []int{
+ http.StatusUnauthorized,
+ http.StatusForbidden,
+ http.StatusProxyAuthRequired,
+}
+
+func DescriptorForComposeFile(path string, content []byte) v1.Descriptor {
+ return v1.Descriptor{
+ MediaType: ComposeYAMLMediaType,
+ Digest: digest.FromString(string(content)),
+ Size: int64(len(content)),
+ Annotations: map[string]string{
+ "com.docker.compose.version": api.ComposeVersion,
+ "com.docker.compose.file": filepath.Base(path),
+ },
+ Data: content,
+ }
+}
+
+func DescriptorForEnvFile(path string, content []byte) v1.Descriptor {
+ return v1.Descriptor{
+ MediaType: ComposeEnvFileMediaType,
+ Digest: digest.FromString(string(content)),
+ Size: int64(len(content)),
+ Annotations: map[string]string{
+ "com.docker.compose.version": api.ComposeVersion,
+ "com.docker.compose.envfile": filepath.Base(path),
+ },
+ Data: content,
+ }
+}
+
+func PushManifest(ctx context.Context, resolver remotes.Resolver, named reference.Named, layers []v1.Descriptor, ociVersion api.OCIVersion) (v1.Descriptor, error) {
+ // Check if we need an extra empty layer for the manifest config
+ if ociVersion == api.OCIVersion1_1 || ociVersion == "" {
+ err := push(ctx, resolver, named, v1.DescriptorEmptyJSON)
+ if err != nil {
+ return v1.Descriptor{}, err
+ }
+ }
+ // prepare to push the manifest by pushing the layers
+ layerDescriptors := make([]v1.Descriptor, len(layers))
+ for i := range layers {
+ layerDescriptors[i] = layers[i]
+ if err := push(ctx, resolver, named, layers[i]); err != nil {
+ return v1.Descriptor{}, err
+ }
+ }
+
+ if ociVersion != "" {
+ // if a version was explicitly specified, use it
+ return createAndPushManifest(ctx, resolver, named, layerDescriptors, ociVersion)
+ }
+
+ // try to push in the OCI 1.1 format but fallback to OCI 1.0 on 4xx errors
+ // (other than auth) since it's most likely the result of the registry not
+ // having support
+ descriptor, err := createAndPushManifest(ctx, resolver, named, layerDescriptors, api.OCIVersion1_1)
+ var pushErr pusherrors.ErrUnexpectedStatus
+ if errors.As(err, &pushErr) && isNonAuthClientError(pushErr.StatusCode) {
+ // TODO(milas): show a warning here (won't work with logrus)
+ return createAndPushManifest(ctx, resolver, named, layerDescriptors, api.OCIVersion1_0)
+ }
+ return descriptor, err
+}
+
+func push(ctx context.Context, resolver remotes.Resolver, ref reference.Named, descriptor v1.Descriptor) error {
+ fullRef, err := reference.WithDigest(reference.TagNameOnly(ref), descriptor.Digest)
+ if err != nil {
+ return err
+ }
+
+ return Push(ctx, resolver, fullRef, descriptor)
+}
+
+func createAndPushManifest(ctx context.Context, resolver remotes.Resolver, named reference.Named, layers []v1.Descriptor, ociVersion api.OCIVersion) (v1.Descriptor, error) {
+ descriptor, toPush, err := generateManifest(layers, ociVersion)
+ if err != nil {
+ return v1.Descriptor{}, err
+ }
+ for _, p := range toPush {
+ err = push(ctx, resolver, named, p)
+ if err != nil {
+ return v1.Descriptor{}, err
+ }
+ }
+ return descriptor, nil
+}
+
+func isNonAuthClientError(statusCode int) bool {
+ if statusCode < 400 || statusCode >= 500 {
+ // not a client error
+ return false
+ }
+ return !slices.Contains(clientAuthStatusCodes, statusCode)
+}
+
+func generateManifest(layers []v1.Descriptor, ociCompat api.OCIVersion) (v1.Descriptor, []v1.Descriptor, error) {
+ var toPush []v1.Descriptor
+ var config v1.Descriptor
+ var artifactType string
+ switch ociCompat {
+ case api.OCIVersion1_0:
+ // "Content other than OCI container images MAY be packaged using the image manifest.
+ // When this is done, the config.mediaType value MUST be set to a value specific to
+ // the artifact type or the empty value."
+ // Source: https://github.com/opencontainers/image-spec/blob/main/manifest.md#guidelines-for-artifact-usage
+ //
+ // The `ComposeEmptyConfigMediaType` is used specifically for this purpose:
+ // there is no config, and an empty descriptor is used for OCI 1.1 in
+ // conjunction with the `ArtifactType`, but for OCI 1.0 compatibility,
+ // tooling falls back to the config media type, so this is used to
+ // indicate that it's not a container image but custom content.
+ configData := []byte("{}")
+ config = v1.Descriptor{
+ MediaType: ComposeEmptyConfigMediaType,
+ Digest: digest.FromBytes(configData),
+ Size: int64(len(configData)),
+ Data: configData,
+ }
+ // N.B. OCI 1.0 does NOT support specifying the artifact type, so it's
+ // left as an empty string to omit it from the marshaled JSON
+ artifactType = ""
+ toPush = append(toPush, config)
+ case api.OCIVersion1_1:
+ config = v1.DescriptorEmptyJSON
+ artifactType = ComposeProjectArtifactType
+ toPush = append(toPush, config)
+ default:
+ return v1.Descriptor{}, nil, fmt.Errorf("unsupported OCI version: %s", ociCompat)
+ }
+
+ manifest, err := json.Marshal(v1.Manifest{
+ Versioned: specs.Versioned{SchemaVersion: 2},
+ MediaType: v1.MediaTypeImageManifest,
+ ArtifactType: artifactType,
+ Config: config,
+ Layers: layers,
+ Annotations: map[string]string{
+ "org.opencontainers.image.created": time.Now().Format(time.RFC3339),
+ },
+ })
+ if err != nil {
+ return v1.Descriptor{}, nil, err
+ }
+
+ manifestDescriptor := v1.Descriptor{
+ MediaType: v1.MediaTypeImageManifest,
+ Digest: digest.FromString(string(manifest)),
+ Size: int64(len(manifest)),
+ Annotations: map[string]string{
+ "com.docker.compose.version": api.ComposeVersion,
+ },
+ ArtifactType: artifactType,
+ Data: manifest,
+ }
+ toPush = append(toPush, manifestDescriptor)
+ return manifestDescriptor, toPush, nil
+}
diff --git a/internal/oci/resolver.go b/internal/oci/resolver.go
new file mode 100644
index 00000000000..6277ab61bea
--- /dev/null
+++ b/internal/oci/resolver.go
@@ -0,0 +1,142 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package oci
+
+import (
+ "context"
+ "io"
+ "net/url"
+ "slices"
+ "strings"
+
+ "github.com/containerd/containerd/v2/core/remotes"
+ "github.com/containerd/containerd/v2/core/remotes/docker"
+ "github.com/containerd/containerd/v2/pkg/labels"
+ "github.com/containerd/errdefs"
+ "github.com/distribution/reference"
+ "github.com/docker/cli/cli/config/configfile"
+ "github.com/docker/compose/v5/internal/registry"
+ "github.com/moby/buildkit/util/contentutil"
+ spec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// NewResolver setup an OCI Resolver based on docker/cli config to provide registry credentials
+func NewResolver(config *configfile.ConfigFile, insecureRegistries ...string) remotes.Resolver {
+ return docker.NewResolver(docker.ResolverOptions{
+ Hosts: docker.ConfigureDefaultRegistries(
+ docker.WithAuthorizer(docker.NewDockerAuthorizer(
+ docker.WithAuthCreds(func(host string) (string, string, error) {
+ host = registry.GetAuthConfigKey(host)
+ auth, err := config.GetAuthConfig(host)
+ if err != nil {
+ return "", "", err
+ }
+ if auth.IdentityToken != "" {
+ return "", auth.IdentityToken, nil
+ }
+ return auth.Username, auth.Password, nil
+ }),
+ )),
+ docker.WithPlainHTTP(func(domain string) (bool, error) {
+ // Should be used for testing **only**
+ return slices.Contains(insecureRegistries, domain), nil
+ }),
+ ),
+ })
+}
+
+// Get retrieves a Named OCI resource and returns OCI Descriptor and Manifest
+func Get(ctx context.Context, resolver remotes.Resolver, ref reference.Named) (spec.Descriptor, []byte, error) {
+ _, descriptor, err := resolver.Resolve(ctx, ref.String())
+ if err != nil {
+ return spec.Descriptor{}, nil, err
+ }
+
+ fetcher, err := resolver.Fetcher(ctx, ref.String())
+ if err != nil {
+ return spec.Descriptor{}, nil, err
+ }
+ fetch, err := fetcher.Fetch(ctx, descriptor)
+ if err != nil {
+ return spec.Descriptor{}, nil, err
+ }
+ content, err := io.ReadAll(fetch)
+ if err != nil {
+ return spec.Descriptor{}, nil, err
+ }
+ return descriptor, content, nil
+}
+
+func Copy(ctx context.Context, resolver remotes.Resolver, image reference.Named, named reference.Named) (spec.Descriptor, error) {
+ src, desc, err := resolver.Resolve(ctx, image.String())
+ if err != nil {
+ return spec.Descriptor{}, err
+ }
+ if desc.Annotations == nil {
+ desc.Annotations = make(map[string]string)
+ }
+ // set LabelDistributionSource so push will actually use a registry mount
+ refspec := reference.TrimNamed(image).String()
+ u, err := url.Parse("dummy://" + refspec)
+ if err != nil {
+ return spec.Descriptor{}, err
+ }
+ source, repo := u.Hostname(), strings.TrimPrefix(u.Path, "/")
+ desc.Annotations[labels.LabelDistributionSource+"."+source] = repo
+
+ p, err := resolver.Pusher(ctx, named.Name())
+ if err != nil {
+ return spec.Descriptor{}, err
+ }
+ f, err := resolver.Fetcher(ctx, src)
+ if err != nil {
+ return spec.Descriptor{}, err
+ }
+
+ err = contentutil.CopyChain(ctx,
+ contentutil.FromPusher(p),
+ contentutil.FromFetcher(f), desc)
+ return desc, err
+}
+
+func Push(ctx context.Context, resolver remotes.Resolver, ref reference.Named, descriptor spec.Descriptor) error {
+ pusher, err := resolver.Pusher(ctx, ref.String())
+ if err != nil {
+ return err
+ }
+ ctx = remotes.WithMediaTypeKeyPrefix(ctx, ComposeYAMLMediaType, "artifact-")
+ ctx = remotes.WithMediaTypeKeyPrefix(ctx, ComposeEnvFileMediaType, "artifact-")
+ ctx = remotes.WithMediaTypeKeyPrefix(ctx, ComposeEmptyConfigMediaType, "config-")
+ ctx = remotes.WithMediaTypeKeyPrefix(ctx, spec.MediaTypeEmptyJSON, "config-")
+
+ push, err := pusher.Push(ctx, descriptor)
+ if errdefs.IsAlreadyExists(err) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ _, err = push.Write(descriptor.Data)
+ if err != nil {
+ // Close the writer on error since Commit won't be called
+ _ = push.Close()
+ return err
+ }
+ // Commit will close the writer
+ return push.Commit(ctx, int64(len(descriptor.Data)), descriptor.Digest)
+}
diff --git a/internal/paths/paths.go b/internal/paths/paths.go
new file mode 100644
index 00000000000..4e4c01b8cc4
--- /dev/null
+++ b/internal/paths/paths.go
@@ -0,0 +1,120 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package paths
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+func IsChild(dir string, file string) bool {
+ if dir == "" {
+ return false
+ }
+
+ dir = filepath.Clean(dir)
+ current := filepath.Clean(file)
+ child := "."
+ for {
+ if strings.EqualFold(dir, current) {
+ // If the two paths are exactly equal, then they must be the same.
+ if dir == current {
+ return true
+ }
+
+ // If the two paths are equal under case-folding, but not exactly equal,
+ // then the only way to check if they're truly "equal" is to check
+ // to see if we're on a case-insensitive file system.
+ //
+ // This is a notoriously tricky problem. See how dep solves it here:
+ // https://github.com/golang/dep/blob/v0.5.4/internal/fs/fs.go#L33
+ //
+ // because you can mount case-sensitive filesystems onto case-insensitive
+ // file-systems, and vice versa :scream:
+ //
+ // We want to do as much of this check as possible with strings-only
+ // (to avoid a file system read and error handling), so we only
+ // do this check if we have no other choice.
+ dirInfo, err := os.Stat(dir)
+ if err != nil {
+ return false
+ }
+
+ currentInfo, err := os.Stat(current)
+ if err != nil {
+ return false
+ }
+
+ if !os.SameFile(dirInfo, currentInfo) {
+ return false
+ }
+ return true
+ }
+
+ if len(current) <= len(dir) || current == "." {
+ return false
+ }
+
+ cDir := filepath.Dir(current)
+ cBase := filepath.Base(current)
+ child = filepath.Join(cBase, child)
+ current = cDir
+ }
+}
+
+// EncompassingPaths returns the minimal set of paths that root all paths
+// from the original collection.
+//
+// For example, ["/foo", "/foo/bar", "/foo", "/baz"] -> ["/foo", "/baz].
+func EncompassingPaths(paths []string) []string {
+ result := []string{}
+ for _, current := range paths {
+ isCovered := false
+ hasRemovals := false
+
+ for i, existing := range result {
+ if IsChild(existing, current) {
+ // The path is already covered, so there's no need to include it
+ isCovered = true
+ break
+ }
+
+ if IsChild(current, existing) {
+ // Mark the element empty for removal.
+ result[i] = ""
+ hasRemovals = true
+ }
+ }
+
+ if !isCovered {
+ result = append(result, current)
+ }
+
+ if hasRemovals {
+ // Remove all the empties
+ newResult := []string{}
+ for _, r := range result {
+ if r != "" {
+ newResult = append(newResult, r)
+ }
+ }
+ result = newResult
+ }
+ }
+ return result
+}
diff --git a/internal/registry/registry.go b/internal/registry/registry.go
new file mode 100644
index 00000000000..0ee73883070
--- /dev/null
+++ b/internal/registry/registry.go
@@ -0,0 +1,44 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package registry
+
+const (
+ // DefaultNamespace is the default namespace
+ DefaultNamespace = "docker.io"
+ // DefaultRegistryHost is the hostname for the default (Docker Hub) registry
+ // used for pushing and pulling images. This hostname is hard-coded to handle
+ // the conversion from image references without registry name (e.g. "ubuntu",
+ // or "ubuntu:latest"), as well as references using the "docker.io" domain
+ // name, which is used as canonical reference for images on Docker Hub, but
+ // does not match the domain-name of Docker Hub's registry.
+ DefaultRegistryHost = "registry-1.docker.io"
+ // IndexHostname is the index hostname, used for authentication and image search.
+ IndexHostname = "index.docker.io"
+ // IndexServer is used for user auth and image search
+ IndexServer = "https://" + IndexHostname + "/v1/"
+ // IndexName is the name of the index
+ IndexName = "docker.io"
+)
+
+// GetAuthConfigKey special-cases using the full index address of the official
+// index as the AuthConfig key, and uses the (host)name[:port] for private indexes.
+func GetAuthConfigKey(indexName string) string {
+ if indexName == IndexName || indexName == IndexHostname || indexName == DefaultRegistryHost {
+ return IndexServer
+ }
+ return indexName
+}
diff --git a/internal/sync/shared.go b/internal/sync/shared.go
new file mode 100644
index 00000000000..4fd9df37719
--- /dev/null
+++ b/internal/sync/shared.go
@@ -0,0 +1,40 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sync
+
+import (
+ "context"
+)
+
+// PathMapping contains the Compose service and modified host system path.
+type PathMapping struct {
+ // HostPath that was created/modified/deleted outside the container.
+ //
+ // This is the path as seen from the user's perspective, e.g.
+ // - C:\Users\moby\Documents\hello-world\main.go (file on Windows)
+ // - /Users/moby/Documents/hello-world (directory on macOS)
+ HostPath string
+ // ContainerPath for the target file inside the container (only populated
+ // for sync events, not rebuild).
+ //
+ // This is the path as used in Docker CLI commands, e.g.
+ // - /workdir/main.go
+ // - /workdir/subdir
+ ContainerPath string
+}
+
+type Syncer interface {
+ Sync(ctx context.Context, service string, paths []*PathMapping) error
+}
diff --git a/internal/sync/tar.go b/internal/sync/tar.go
new file mode 100644
index 00000000000..4250b6afc4a
--- /dev/null
+++ b/internal/sync/tar.go
@@ -0,0 +1,352 @@
+/*
+ Copyright 2018 The Tilt Dev Authors
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sync
+
+import (
+ "archive/tar"
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/moby/go-archive"
+ "golang.org/x/sync/errgroup"
+)
+
+type archiveEntry struct {
+ path string
+ info os.FileInfo
+ header *tar.Header
+}
+
+type LowLevelClient interface {
+ ContainersForService(ctx context.Context, projectName string, serviceName string) ([]container.Summary, error)
+
+ Exec(ctx context.Context, containerID string, cmd []string, in io.Reader) error
+ Untar(ctx context.Context, id string, reader io.ReadCloser) error
+}
+
+type Tar struct {
+ client LowLevelClient
+
+ projectName string
+}
+
+var _ Syncer = &Tar{}
+
+func NewTar(projectName string, client LowLevelClient) *Tar {
+ return &Tar{
+ projectName: projectName,
+ client: client,
+ }
+}
+
+func (t *Tar) Sync(ctx context.Context, service string, paths []*PathMapping) error {
+ containers, err := t.client.ContainersForService(ctx, t.projectName, service)
+ if err != nil {
+ return err
+ }
+
+ var pathsToCopy []PathMapping
+ var pathsToDelete []string
+ for _, p := range paths {
+ if _, err := os.Stat(p.HostPath); err != nil && errors.Is(err, fs.ErrNotExist) {
+ pathsToDelete = append(pathsToDelete, p.ContainerPath)
+ } else {
+ pathsToCopy = append(pathsToCopy, *p)
+ }
+ }
+
+ var deleteCmd []string
+ if len(pathsToDelete) != 0 {
+ deleteCmd = append([]string{"rm", "-rf"}, pathsToDelete...)
+ }
+
+ var (
+ eg errgroup.Group
+ errMu sync.Mutex
+ errs = make([]error, 0, len(containers)*2) // max 2 errs per container
+ )
+
+ eg.SetLimit(16) // arbitrary limit, adjust to taste :D
+ for i := range containers {
+ containerID := containers[i].ID
+ tarReader := tarArchive(pathsToCopy)
+
+ eg.Go(func() error {
+ if len(deleteCmd) != 0 {
+ if err := t.client.Exec(ctx, containerID, deleteCmd, nil); err != nil {
+ errMu.Lock()
+ errs = append(errs, fmt.Errorf("deleting paths in %s: %w", containerID, err))
+ errMu.Unlock()
+ }
+ }
+
+ if err := t.client.Untar(ctx, containerID, tarReader); err != nil {
+ errMu.Lock()
+ errs = append(errs, fmt.Errorf("copying files to %s: %w", containerID, err))
+ errMu.Unlock()
+ }
+ return nil // don't fail-fast; collect all errors
+ })
+ }
+
+ _ = eg.Wait()
+ return errors.Join(errs...)
+}
+
+type ArchiveBuilder struct {
+ tw *tar.Writer
+ // A shared I/O buffer to help with file copying.
+ copyBuf *bytes.Buffer
+}
+
+func NewArchiveBuilder(writer io.Writer) *ArchiveBuilder {
+ tw := tar.NewWriter(writer)
+ return &ArchiveBuilder{
+ tw: tw,
+ copyBuf: &bytes.Buffer{},
+ }
+}
+
+func (a *ArchiveBuilder) Close() error {
+ return a.tw.Close()
+}
+
+// ArchivePathsIfExist creates a tar archive of all local files in `paths`. It quietly skips any paths that don't exist.
+func (a *ArchiveBuilder) ArchivePathsIfExist(paths []PathMapping) error {
+ // In order to handle overlapping syncs, we
+ // 1) collect all the entries,
+ // 2) de-dupe them, with last-one-wins semantics
+ // 3) write all the entries
+ //
+ // It's not obvious that this is the correct behavior. A better approach
+ // (that's more in-line with how syncs work) might ignore files in earlier
+ // path mappings when we know they're going to be "synced" over.
+ // There's a bunch of subtle product decisions about how overlapping path
+ // mappings work that we're not sure about.
+ var entries []archiveEntry
+ for _, p := range paths {
+ newEntries, err := a.entriesForPath(p.HostPath, p.ContainerPath)
+ if err != nil {
+ return fmt.Errorf("inspecting %q: %w", p.HostPath, err)
+ }
+
+ entries = append(entries, newEntries...)
+ }
+
+ entries = dedupeEntries(entries)
+ for _, entry := range entries {
+ err := a.writeEntry(entry)
+ if err != nil {
+ return fmt.Errorf("archiving %q: %w", entry.path, err)
+ }
+ }
+ return nil
+}
+
+func (a *ArchiveBuilder) writeEntry(entry archiveEntry) error {
+ pathInTar := entry.path
+ header := entry.header
+
+ if header.Typeflag != tar.TypeReg {
+ // anything other than a regular file (e.g. dir, symlink) just needs the header
+ if err := a.tw.WriteHeader(header); err != nil {
+ return fmt.Errorf("writing %q header: %w", pathInTar, err)
+ }
+ return nil
+ }
+
+ file, err := os.Open(pathInTar)
+ if err != nil {
+ // In case the file has been deleted since we last looked at it.
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ defer func() {
+ _ = file.Close()
+ }()
+
+ // The size header must match the number of contents bytes.
+ //
+ // There is room for a race condition here if something writes to the file
+ // after we've read the file size.
+ //
+ // For small files, we avoid this by first copying the file into a buffer,
+ // and using the size of the buffer to populate the header.
+ //
+ // For larger files, we don't want to copy the whole thing into a buffer,
+ // because that would blow up heap size. There is some danger that this
+ // will lead to a spurious error when the tar writer validates the sizes.
+ // That error will be disruptive but will be handled as best as we
+ // can downstream.
+ useBuf := header.Size < 5000000
+ if useBuf {
+ a.copyBuf.Reset()
+ _, err = io.Copy(a.copyBuf, file)
+ if err != nil && !errors.Is(err, io.EOF) {
+ return fmt.Errorf("copying %q: %w", pathInTar, err)
+ }
+ header.Size = int64(len(a.copyBuf.Bytes()))
+ }
+
+ // wait to write the header until _after_ the file is successfully opened
+ // to avoid generating an invalid tar entry that has a header but no contents
+ // in the case the file has been deleted
+ err = a.tw.WriteHeader(header)
+ if err != nil {
+ return fmt.Errorf("writing %q header: %w", pathInTar, err)
+ }
+
+ if useBuf {
+ _, err = io.Copy(a.tw, a.copyBuf)
+ } else {
+ _, err = io.Copy(a.tw, file)
+ }
+
+ if err != nil && !errors.Is(err, io.EOF) {
+ return fmt.Errorf("copying %q: %w", pathInTar, err)
+ }
+
+ // explicitly flush so that if the entry is invalid we will detect it now and
+ // provide a more meaningful error
+ if err := a.tw.Flush(); err != nil {
+ return fmt.Errorf("finalizing %q: %w", pathInTar, err)
+ }
+ return nil
+}
+
+// entriesForPath writes the given source path into tarWriter at the given dest (recursively for directories).
+// e.g. tarring my_dir --> dest d: d/file_a, d/file_b
+// If source path does not exist, quietly skips it and returns no err
+func (a *ArchiveBuilder) entriesForPath(localPath, containerPath string) ([]archiveEntry, error) {
+ localInfo, err := os.Stat(localPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ localPathIsDir := localInfo.IsDir()
+ if localPathIsDir {
+ // Make sure we can trim this off filenames to get valid relative filepaths
+ if !strings.HasSuffix(localPath, string(filepath.Separator)) {
+ localPath += string(filepath.Separator)
+ }
+ }
+
+ containerPath = strings.TrimPrefix(containerPath, "/")
+
+ result := make([]archiveEntry, 0)
+ err = filepath.Walk(localPath, func(curLocalPath string, info os.FileInfo, err error) error {
+ if err != nil {
+ return fmt.Errorf("walking %q: %w", curLocalPath, err)
+ }
+
+ linkname := ""
+ if info.Mode()&os.ModeSymlink != 0 {
+ var err error
+ linkname, err = os.Readlink(curLocalPath)
+ if err != nil {
+ return err
+ }
+ }
+
+ var name string
+ //nolint:gocritic
+ if localPathIsDir {
+ // Name of file in tar should be relative to source directory...
+ tmp, err := filepath.Rel(localPath, curLocalPath)
+ if err != nil {
+ return fmt.Errorf("making %q relative to %q: %w", curLocalPath, localPath, err)
+ }
+ // ...and live inside `dest`
+ name = path.Join(containerPath, filepath.ToSlash(tmp))
+ } else if strings.HasSuffix(containerPath, "/") {
+ name = containerPath + filepath.Base(curLocalPath)
+ } else {
+ name = containerPath
+ }
+
+ header, err := archive.FileInfoHeader(name, info, linkname)
+ if err != nil {
+ // Not all types of files are allowed in a tarball. That's OK.
+ // Mimic the Docker behavior and just skip the file.
+ return nil
+ }
+
+ result = append(result, archiveEntry{
+ path: curLocalPath,
+ info: info,
+ header: header,
+ })
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+func tarArchive(ops []PathMapping) io.ReadCloser {
+ pr, pw := io.Pipe()
+ go func() {
+ ab := NewArchiveBuilder(pw)
+ err := ab.ArchivePathsIfExist(ops)
+ if err != nil {
+ _ = pw.CloseWithError(fmt.Errorf("adding files to tar: %w", err))
+ } else {
+ // propagate errors from the TarWriter::Close() because it performs a final
+ // Flush() and any errors mean the tar is invalid
+ if err := ab.Close(); err != nil {
+ _ = pw.CloseWithError(fmt.Errorf("closing tar: %w", err))
+ } else {
+ _ = pw.Close()
+ }
+ }
+ }()
+ return pr
+}
+
+// Dedupe the entries with last-entry-wins semantics.
+func dedupeEntries(entries []archiveEntry) []archiveEntry {
+ seenIndex := make(map[string]int, len(entries))
+ result := make([]archiveEntry, 0, len(entries))
+ for i, entry := range entries {
+ seenIndex[entry.header.Name] = i
+ }
+ for i, entry := range entries {
+ if seenIndex[entry.header.Name] == i {
+ result = append(result, entry)
+ }
+ }
+ return result
+}
diff --git a/internal/tracing/attributes.go b/internal/tracing/attributes.go
new file mode 100644
index 00000000000..2c8779bc86c
--- /dev/null
+++ b/internal/tracing/attributes.go
@@ -0,0 +1,196 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package tracing
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/docker/api/types/container"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// SpanOptions is a small helper type to make it easy to share the options helpers between
+// downstream functions that accept slices of trace.SpanStartOption and trace.EventOption.
+type SpanOptions []trace.SpanStartEventOption
+
+type MetricsKey struct{}
+
+type Metrics struct {
+ CountExtends int
+ CountIncludesLocal int
+ CountIncludesRemote int
+}
+
+func (s SpanOptions) SpanStartOptions() []trace.SpanStartOption {
+ out := make([]trace.SpanStartOption, len(s))
+ for i := range s {
+ out[i] = s[i]
+ }
+ return out
+}
+
+func (s SpanOptions) EventOptions() []trace.EventOption {
+ out := make([]trace.EventOption, len(s))
+ for i := range s {
+ out[i] = s[i]
+ }
+ return out
+}
+
+// ProjectOptions returns common attributes from a Compose project.
+//
+// For convenience, it's returned as a SpanOptions object to allow it to be
+// passed directly to the wrapping helper methods in this package such as
+// SpanWrapFunc.
+func ProjectOptions(ctx context.Context, proj *types.Project) SpanOptions {
+ if proj == nil {
+ return nil
+ }
+ capabilities, gpu, tpu := proj.ServicesWithCapabilities()
+ attrs := []attribute.KeyValue{
+ attribute.String("project.name", proj.Name),
+ attribute.String("project.dir", proj.WorkingDir),
+ attribute.StringSlice("project.compose_files", proj.ComposeFiles),
+ attribute.StringSlice("project.profiles", proj.Profiles),
+ attribute.StringSlice("project.volumes", proj.VolumeNames()),
+ attribute.StringSlice("project.networks", proj.NetworkNames()),
+ attribute.StringSlice("project.secrets", proj.SecretNames()),
+ attribute.StringSlice("project.configs", proj.ConfigNames()),
+ attribute.StringSlice("project.models", proj.ModelNames()),
+ attribute.StringSlice("project.extensions", keys(proj.Extensions)),
+ attribute.StringSlice("project.services.active", proj.ServiceNames()),
+ attribute.StringSlice("project.services.disabled", proj.DisabledServiceNames()),
+ attribute.StringSlice("project.services.build", proj.ServicesWithBuild()),
+ attribute.StringSlice("project.services.depends_on", proj.ServicesWithDependsOn()),
+ attribute.StringSlice("project.services.models", proj.ServicesWithModels()),
+ attribute.StringSlice("project.services.capabilities", capabilities),
+ attribute.StringSlice("project.services.capabilities.gpu", gpu),
+ attribute.StringSlice("project.services.capabilities.tpu", tpu),
+ }
+ if metrics, ok := ctx.Value(MetricsKey{}).(Metrics); ok {
+ attrs = append(attrs, attribute.Int("project.services.extends", metrics.CountExtends))
+ attrs = append(attrs, attribute.Int("project.includes.local", metrics.CountIncludesLocal))
+ attrs = append(attrs, attribute.Int("project.includes.remote", metrics.CountIncludesRemote))
+ }
+
+ if projHash, ok := projectHash(proj); ok {
+ attrs = append(attrs, attribute.String("project.hash", projHash))
+ }
+ return []trace.SpanStartEventOption{
+ trace.WithAttributes(attrs...),
+ }
+}
+
+// ServiceOptions returns common attributes from a Compose service.
+//
+// For convenience, it's returned as a SpanOptions object to allow it to be
+// passed directly to the wrapping helper methods in this package such as
+// SpanWrapFunc.
+func ServiceOptions(service types.ServiceConfig) SpanOptions {
+ attrs := []attribute.KeyValue{
+ attribute.String("service.name", service.Name),
+ attribute.String("service.image", service.Image),
+ attribute.StringSlice("service.networks", keys(service.Networks)),
+ attribute.StringSlice("service.models", keys(service.Models)),
+ }
+
+ configNames := make([]string, len(service.Configs))
+ for i := range service.Configs {
+ configNames[i] = service.Configs[i].Source
+ }
+ attrs = append(attrs, attribute.StringSlice("service.configs", configNames))
+
+ secretNames := make([]string, len(service.Secrets))
+ for i := range service.Secrets {
+ secretNames[i] = service.Secrets[i].Source
+ }
+ attrs = append(attrs, attribute.StringSlice("service.secrets", secretNames))
+
+ volNames := make([]string, len(service.Volumes))
+ for i := range service.Volumes {
+ volNames[i] = service.Volumes[i].Source
+ }
+ attrs = append(attrs, attribute.StringSlice("service.volumes", volNames))
+
+ return []trace.SpanStartEventOption{
+ trace.WithAttributes(attrs...),
+ }
+}
+
+// ContainerOptions returns common attributes from a Moby container.
+//
+// For convenience, it's returned as a SpanOptions object to allow it to be
+// passed directly to the wrapping helper methods in this package such as
+// SpanWrapFunc.
+func ContainerOptions(ctr container.Summary) SpanOptions {
+ attrs := []attribute.KeyValue{
+ attribute.String("container.id", ctr.ID),
+ attribute.String("container.image", ctr.Image),
+ unixTimeAttr("container.created_at", ctr.Created),
+ }
+
+ if len(ctr.Names) != 0 {
+ attrs = append(attrs, attribute.String("container.name", strings.TrimPrefix(ctr.Names[0], "/")))
+ }
+
+ return []trace.SpanStartEventOption{
+ trace.WithAttributes(attrs...),
+ }
+}
+
+func keys[T any](m map[string]T) []string {
+ out := make([]string, 0, len(m))
+ for k := range m {
+ out = append(out, k)
+ }
+ return out
+}
+
+func timeAttr(key string, value time.Time) attribute.KeyValue {
+ return attribute.String(key, value.Format(time.RFC3339))
+}
+
+func unixTimeAttr(key string, value int64) attribute.KeyValue {
+ return timeAttr(key, time.Unix(value, 0).UTC())
+}
+
+// projectHash returns a checksum from the JSON encoding of the project.
+func projectHash(p *types.Project) (string, bool) {
+ if p == nil {
+ return "", false
+ }
+ // disabled services aren't included in the output, so make a copy with
+ // all the services active for hashing
+ var err error
+ p, err = p.WithServicesEnabled(append(p.ServiceNames(), p.DisabledServiceNames()...)...)
+ if err != nil {
+ return "", false
+ }
+ projData, err := json.Marshal(p)
+ if err != nil {
+ return "", false
+ }
+ d := sha256.Sum256(projData)
+ return fmt.Sprintf("%x", d), true
+}
diff --git a/internal/tracing/attributes_test.go b/internal/tracing/attributes_test.go
new file mode 100644
index 00000000000..d4277a940ab
--- /dev/null
+++ b/internal/tracing/attributes_test.go
@@ -0,0 +1,67 @@
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package tracing
+
+import (
+ "testing"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/stretchr/testify/require"
+)
+
+func TestProjectHash(t *testing.T) {
+ projA := &types.Project{
+ Name: "fake-proj",
+ WorkingDir: "/tmp",
+ Services: map[string]types.ServiceConfig{
+ "foo": {Image: "fake-image"},
+ },
+ DisabledServices: map[string]types.ServiceConfig{
+ "bar": {Image: "diff-image"},
+ },
+ }
+ projB := &types.Project{
+ Name: "fake-proj",
+ WorkingDir: "/tmp",
+ Services: map[string]types.ServiceConfig{
+ "foo": {Image: "fake-image"},
+ "bar": {Image: "diff-image"},
+ },
+ }
+ projC := &types.Project{
+ Name: "fake-proj",
+ WorkingDir: "/tmp",
+ Services: map[string]types.ServiceConfig{
+ "foo": {Image: "fake-image"},
+ "bar": {Image: "diff-image"},
+ "baz": {Image: "yet-another-image"},
+ },
+ }
+
+ hashA, ok := projectHash(projA)
+ require.True(t, ok)
+ require.NotEmpty(t, hashA)
+ hashB, ok := projectHash(projB)
+ require.True(t, ok)
+ require.NotEmpty(t, hashB)
+ require.Equal(t, hashA, hashB)
+
+ hashC, ok := projectHash(projC)
+ require.True(t, ok)
+ require.NotEmpty(t, hashC)
+ require.NotEqual(t, hashC, hashA)
+}
diff --git a/internal/tracing/docker_context.go b/internal/tracing/docker_context.go
new file mode 100644
index 00000000000..75f988d19ab
--- /dev/null
+++ b/internal/tracing/docker_context.go
@@ -0,0 +1,122 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package tracing
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/cli/cli/context/store"
+ "github.com/docker/compose/v5/internal/memnet"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+)
+
+const otelConfigFieldName = "otel"
+
+// traceClientFromDockerContext creates a gRPC OTLP client based on metadata
+// from the active Docker CLI context.
+func traceClientFromDockerContext(dockerCli command.Cli, otelEnv envMap) (otlptrace.Client, error) {
+ // attempt to extract an OTEL config from the Docker context to enable
+ // automatic integration with Docker Desktop;
+ cfg, err := ConfigFromDockerContext(dockerCli.ContextStore(), dockerCli.CurrentContext())
+ if err != nil {
+ return nil, fmt.Errorf("loading otel config from docker context metadata: %w", err)
+ }
+
+ if cfg.Endpoint == "" {
+ return nil, nil
+ }
+
+ // HACK: unfortunately _all_ public OTEL initialization functions
+ // implicitly read from the OS env, so temporarily unset them all and
+ // restore afterwards
+ defer func() {
+ for k, v := range otelEnv {
+ if err := os.Setenv(k, v); err != nil {
+ panic(fmt.Errorf("restoring env for %q: %w", k, err))
+ }
+ }
+ }()
+ for k := range otelEnv {
+ if err := os.Unsetenv(k); err != nil {
+ return nil, fmt.Errorf("stashing env for %q: %w", k, err)
+ }
+ }
+
+ conn, err := grpc.NewClient(cfg.Endpoint,
+ grpc.WithContextDialer(memnet.DialEndpoint),
+ // this dial is restricted to using a local Unix socket / named pipe,
+ // so there is no need for TLS
+ grpc.WithTransportCredentials(insecure.NewCredentials()),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("initializing otel connection from docker context metadata: %w", err)
+ }
+
+ client := otlptracegrpc.NewClient(otlptracegrpc.WithGRPCConn(conn))
+ return client, nil
+}
+
+// ConfigFromDockerContext inspects extra metadata included as part of the
+// specified Docker context to try and extract a valid OTLP client configuration.
+func ConfigFromDockerContext(st store.Store, name string) (OTLPConfig, error) {
+ meta, err := st.GetMetadata(name)
+ if err != nil {
+ return OTLPConfig{}, err
+ }
+
+ var otelCfg interface{}
+ switch m := meta.Metadata.(type) {
+ case command.DockerContext:
+ otelCfg = m.AdditionalFields[otelConfigFieldName]
+ case map[string]interface{}:
+ otelCfg = m[otelConfigFieldName]
+ }
+ if otelCfg == nil {
+ return OTLPConfig{}, nil
+ }
+
+ otelMap, ok := otelCfg.(map[string]interface{})
+ if !ok {
+ return OTLPConfig{}, fmt.Errorf(
+ "unexpected type for field %q: %T (expected: %T)",
+ otelConfigFieldName,
+ otelCfg,
+ otelMap,
+ )
+ }
+
+ // keys from https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/
+ cfg := OTLPConfig{
+ Endpoint: valueOrDefault[string](otelMap, "OTEL_EXPORTER_OTLP_ENDPOINT"),
+ }
+ return cfg, nil
+}
+
+// valueOrDefault returns the type-cast value at the specified key in the map
+// if present and the correct type; otherwise, it returns the default value for
+// T.
+func valueOrDefault[T any](m map[string]interface{}, key string) T {
+ if v, ok := m[key].(T); ok {
+ return v
+ }
+ return *new(T)
+}
diff --git a/internal/tracing/errors.go b/internal/tracing/errors.go
new file mode 100644
index 00000000000..9fa615054c0
--- /dev/null
+++ b/internal/tracing/errors.go
@@ -0,0 +1,29 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package tracing
+
+import (
+ "go.opentelemetry.io/otel"
+)
+
+// skipErrors is a no-op otel.ErrorHandler.
+type skipErrors struct{}
+
+// Handle does nothing, ignoring any errors passed to it.
+func (skipErrors) Handle(_ error) {}
+
+var _ otel.ErrorHandler = skipErrors{}
diff --git a/internal/tracing/keyboard_metrics.go b/internal/tracing/keyboard_metrics.go
new file mode 100644
index 00000000000..2e5120fbea7
--- /dev/null
+++ b/internal/tracing/keyboard_metrics.go
@@ -0,0 +1,35 @@
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package tracing
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+func KeyboardMetrics(ctx context.Context, enabled, isDockerDesktopActive bool) {
+ commandAvailable := []string{}
+ if isDockerDesktopActive {
+ commandAvailable = append(commandAvailable, "gui")
+ commandAvailable = append(commandAvailable, "gui/composeview")
+ }
+
+ AddAttributeToSpan(ctx,
+ attribute.Bool("navmenu.enabled", enabled),
+ attribute.StringSlice("navmenu.command_available", commandAvailable))
+}
diff --git a/internal/tracing/mux.go b/internal/tracing/mux.go
new file mode 100644
index 00000000000..1a09ef1d96a
--- /dev/null
+++ b/internal/tracing/mux.go
@@ -0,0 +1,73 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package tracing
+
+import (
+ "context"
+ "errors"
+ "sync"
+
+ sdktrace "go.opentelemetry.io/otel/sdk/trace"
+)
+
+type MuxExporter struct {
+ exporters []sdktrace.SpanExporter
+}
+
+func (m MuxExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error {
+ var (
+ wg sync.WaitGroup
+ errMu sync.Mutex
+ errs = make([]error, 0, len(m.exporters))
+ )
+
+ for _, exporter := range m.exporters {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ if err := exporter.ExportSpans(ctx, spans); err != nil {
+ errMu.Lock()
+ errs = append(errs, err)
+ errMu.Unlock()
+ }
+ }()
+ }
+ wg.Wait()
+ return errors.Join(errs...)
+}
+
+func (m MuxExporter) Shutdown(ctx context.Context) error {
+ var (
+ wg sync.WaitGroup
+ errMu sync.Mutex
+ errs = make([]error, 0, len(m.exporters))
+ )
+
+ for _, exporter := range m.exporters {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ if err := exporter.Shutdown(ctx); err != nil {
+ errMu.Lock()
+ errs = append(errs, err)
+ errMu.Unlock()
+ }
+ }()
+ }
+ wg.Wait()
+ return errors.Join(errs...)
+}
diff --git a/internal/tracing/tracing.go b/internal/tracing/tracing.go
new file mode 100644
index 00000000000..cab8ee3b29c
--- /dev/null
+++ b/internal/tracing/tracing.go
@@ -0,0 +1,145 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package tracing
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/docker/compose/v5/internal"
+ "go.opentelemetry.io/otel/attribute"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/moby/buildkit/util/tracing/detect"
+ _ "github.com/moby/buildkit/util/tracing/env" //nolint:blank-imports
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ sdktrace "go.opentelemetry.io/otel/sdk/trace"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+)
+
+func init() {
+ detect.ServiceName = "compose"
+ // do not log tracing errors to stdio
+ otel.SetErrorHandler(skipErrors{})
+}
+
+// OTLPConfig contains the necessary values to initialize an OTLP client
+// manually.
+//
+// This supports a minimal set of options based on what is necessary for
+// automatic OTEL configuration from Docker context metadata.
+type OTLPConfig struct {
+ Endpoint string
+}
+
+// ShutdownFunc flushes and stops an OTEL exporter.
+type ShutdownFunc func(ctx context.Context) error
+
+// envMap is a convenience type for OS environment variables.
+type envMap map[string]string
+
+func InitTracing(dockerCli command.Cli) (ShutdownFunc, error) {
+ // set global propagator to tracecontext (the default is no-op).
+ otel.SetTextMapPropagator(propagation.TraceContext{})
+ return InitProvider(dockerCli)
+}
+
+func InitProvider(dockerCli command.Cli) (ShutdownFunc, error) {
+ ctx := context.Background()
+
+ var errs []error
+ var exporters []sdktrace.SpanExporter
+
+ envClient, otelEnv := traceClientFromEnv()
+ if envClient != nil {
+ if envExporter, err := otlptrace.New(ctx, envClient); err != nil {
+ errs = append(errs, err)
+ } else if envExporter != nil {
+ exporters = append(exporters, envExporter)
+ }
+ }
+
+ if dcClient, err := traceClientFromDockerContext(dockerCli, otelEnv); err != nil {
+ errs = append(errs, err)
+ } else if dcClient != nil {
+ if dcExporter, err := otlptrace.New(ctx, dcClient); err != nil {
+ errs = append(errs, err)
+ } else if dcExporter != nil {
+ exporters = append(exporters, dcExporter)
+ }
+ }
+ if len(errs) != 0 {
+ return nil, errors.Join(errs...)
+ }
+
+ res, err := resource.New(
+ ctx,
+ resource.WithAttributes(
+ semconv.ServiceName("compose"),
+ semconv.ServiceVersion(internal.Version),
+ attribute.String("docker.context", dockerCli.CurrentContext()),
+ ),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create resource: %w", err)
+ }
+
+ muxExporter := MuxExporter{exporters: exporters}
+ tracerProvider := sdktrace.NewTracerProvider(
+ sdktrace.WithResource(res),
+ sdktrace.WithBatcher(muxExporter),
+ )
+ otel.SetTracerProvider(tracerProvider)
+
+ // Shutdown will flush any remaining spans and shut down the exporter.
+ return tracerProvider.Shutdown, nil
+}
+
+// traceClientFromEnv creates a GRPC OTLP client based on OS environment
+// variables.
+//
+// https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/
+func traceClientFromEnv() (otlptrace.Client, envMap) {
+ hasOtelEndpointInEnv := false
+ otelEnv := make(map[string]string)
+ for _, kv := range os.Environ() {
+ k, v, ok := strings.Cut(kv, "=")
+ if !ok {
+ continue
+ }
+ if strings.HasPrefix(k, "OTEL_") {
+ otelEnv[k] = v
+ if strings.HasSuffix(k, "ENDPOINT") {
+ hasOtelEndpointInEnv = true
+ }
+ }
+ }
+
+ if !hasOtelEndpointInEnv {
+ return nil, nil
+ }
+
+ client := otlptracegrpc.NewClient()
+ return client, otelEnv
+}
diff --git a/internal/tracing/tracing_test.go b/internal/tracing/tracing_test.go
new file mode 100644
index 00000000000..557f13b4727
--- /dev/null
+++ b/internal/tracing/tracing_test.go
@@ -0,0 +1,60 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package tracing_test
+
+import (
+ "testing"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/cli/cli/context/store"
+ "github.com/stretchr/testify/require"
+
+ "github.com/docker/compose/v5/internal/tracing"
+)
+
+var testStoreCfg = store.NewConfig(
+ func() interface{} {
+ return &map[string]interface{}{}
+ },
+)
+
+func TestExtractOtelFromContext(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Requires filesystem access")
+ }
+
+ dir := t.TempDir()
+
+ st := store.New(dir, testStoreCfg)
+ err := st.CreateOrUpdate(store.Metadata{
+ Name: "test",
+ Metadata: command.DockerContext{
+ Description: t.Name(),
+ AdditionalFields: map[string]interface{}{
+ "otel": map[string]interface{}{
+ "OTEL_EXPORTER_OTLP_ENDPOINT": "localhost:1234",
+ },
+ },
+ },
+ Endpoints: make(map[string]interface{}),
+ })
+ require.NoError(t, err)
+
+ cfg, err := tracing.ConfigFromDockerContext(st, "test")
+ require.NoError(t, err)
+ require.Equal(t, "localhost:1234", cfg.Endpoint)
+}
diff --git a/internal/tracing/wrap.go b/internal/tracing/wrap.go
new file mode 100644
index 00000000000..e525b738b42
--- /dev/null
+++ b/internal/tracing/wrap.go
@@ -0,0 +1,98 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package tracing
+
+import (
+ "context"
+
+ "github.com/acarl005/stripansi"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.19.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// SpanWrapFunc wraps a function that takes a context with a trace.Span, marking the status as codes.Error if the
+// wrapped function returns an error.
+//
+// The context passed to the function is created from the span to ensure correct propagation.
+//
+// NOTE: This function is nearly identical to SpanWrapFuncForErrGroup, except the latter is designed specially for
+// convenience with errgroup.Group due to its prevalence throughout the codebase. The code is duplicated to avoid
+// adding even more levels of function wrapping/indirection.
+func SpanWrapFunc(spanName string, opts SpanOptions, fn func(ctx context.Context) error) func(context.Context) error {
+ return func(ctx context.Context) error {
+ ctx, span := otel.Tracer("").Start(ctx, spanName, opts.SpanStartOptions()...)
+ defer span.End()
+
+ if err := fn(ctx); err != nil {
+ span.SetStatus(codes.Error, err.Error())
+ return err
+ }
+
+ span.SetStatus(codes.Ok, "")
+ return nil
+ }
+}
+
+// SpanWrapFuncForErrGroup wraps a function that takes a context with a trace.Span, marking the status as codes.Error
+// if the wrapped function returns an error.
+//
+// The context passed to the function is created from the span to ensure correct propagation.
+//
+// NOTE: This function is nearly identical to SpanWrapFunc, except this function is designed specially for
+// convenience with errgroup.Group due to its prevalence throughout the codebase. The code is duplicated to avoid
+// adding even more levels of function wrapping/indirection.
+func SpanWrapFuncForErrGroup(ctx context.Context, spanName string, opts SpanOptions, fn func(ctx context.Context) error) func() error {
+ return func() error {
+ ctx, span := otel.Tracer("").Start(ctx, spanName, opts.SpanStartOptions()...)
+ defer span.End()
+
+ if err := fn(ctx); err != nil {
+ span.SetStatus(codes.Error, err.Error())
+ return err
+ }
+
+ span.SetStatus(codes.Ok, "")
+ return nil
+ }
+}
+
+// EventWrapFuncForErrGroup invokes a function and records an event, optionally including the returned
+// error as the "exception message" on the event.
+//
+// This is intended for lightweight usage to wrap errgroup.Group calls where a full span is not desired.
+func EventWrapFuncForErrGroup(ctx context.Context, eventName string, opts SpanOptions, fn func(ctx context.Context) error) func() error {
+ return func() error {
+ span := trace.SpanFromContext(ctx)
+ eventOpts := opts.EventOptions()
+
+ err := fn(ctx)
+ if err != nil {
+ eventOpts = append(eventOpts, trace.WithAttributes(semconv.ExceptionMessage(stripansi.Strip(err.Error()))))
+ }
+ span.AddEvent(eventName, eventOpts...)
+
+ return err
+ }
+}
+
+func AddAttributeToSpan(ctx context.Context, attr ...attribute.KeyValue) {
+ span := trace.SpanFromContext(ctx)
+ span.SetAttributes(attr...)
+}
diff --git a/internal/variables.go b/internal/variables.go
index a1144843ae5..28b701cc2db 100644
--- a/internal/variables.go
+++ b/internal/variables.go
@@ -1,5 +1,5 @@
/*
- Copyright 2020 Docker Compose CLI authors
+ Copyright 2023 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,7 +16,5 @@
package internal
-var (
- // Version is the version of the CLI injected in compilation time
- Version = "dev"
-)
+// Version is the version of the CLI injected in compilation time
+var Version = "dev"
diff --git a/packaging/LICENSE b/packaging/LICENSE
deleted file mode 100644
index fcf6aa8926d..00000000000
--- a/packaging/LICENSE
+++ /dev/null
@@ -1,2 +0,0 @@
-The Docker End User License Agreement (https://www.docker.com/legal/docker-software-end-user-license-agreement) describes Docker's Terms for this software.
-By downloading, accessing, or using this software you expressly accept and agree to the Terms set out in the Docker End User License Agreement.
diff --git a/pkg/api/api.go b/pkg/api/api.go
index 510a1391e66..aed77af1523 100644
--- a/pkg/api/api.go
+++ b/pkg/api/api.go
@@ -19,22 +19,75 @@ package api
import (
"context"
"fmt"
+ "io"
+ "slices"
"strings"
"time"
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/cli"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/containerd/platforms"
+ "github.com/docker/cli/opts"
+ "github.com/docker/docker/api/types/volume"
)
-// Service manages a compose project
-type Service interface {
+// LoadListener receives events during project loading.
+// Events include:
+// - "extends": when a service extends another (metadata: service info)
+// - "include": when including external compose files (metadata: {"path": StringList})
+//
+// Multiple listeners can be registered, and all will be notified of events.
+type LoadListener func(event string, metadata map[string]any)
+
+// ProjectLoadOptions configures how a Compose project should be loaded
+type ProjectLoadOptions struct {
+ // ProjectName to use, or empty to infer from directory
+ ProjectName string
+ // ConfigPaths are paths to compose files
+ ConfigPaths []string
+ // WorkingDir is the project directory
+ WorkingDir string
+ // EnvFiles are paths to .env files
+ EnvFiles []string
+ // Profiles to activate
+ Profiles []string
+ // Services to select (empty = all)
+ Services []string
+ // Offline mode disables remote resource loading
+ Offline bool
+ // All includes all resources (not just those used by services)
+ All bool
+ // Compatibility enables v1 compatibility mode
+ Compatibility bool
+
+ // ProjectOptionsFns are compose-go project options to apply.
+ // Use cli.WithInterpolation(false), cli.WithNormalization(false), etc.
+ // This is optional - pass nil or empty slice to use defaults.
+ ProjectOptionsFns []cli.ProjectOptionsFn
+
+ // LoadListeners receive events during project loading.
+ // All registered listeners will be notified of events.
+ // This is optional - pass nil or empty slice if not needed.
+ LoadListeners []LoadListener
+
+ OCI OCIOptions
+}
+
+type OCIOptions struct {
+ InsecureRegistries []string
+}
+
+// Compose is the API interface one can use to programmatically use docker/compose in a third-party software
+// Use [compose.NewComposeService] to get an actual instance
+type Compose interface {
// Build executes the equivalent to a `compose build`
Build(ctx context.Context, project *types.Project, options BuildOptions) error
- // Push executes the equivalent ot a `compose push`
+ // Push executes the equivalent to a `compose push`
Push(ctx context.Context, project *types.Project, options PushOptions) error
// Pull executes the equivalent of a `compose pull`
- Pull(ctx context.Context, project *types.Project, opts PullOptions) error
+ Pull(ctx context.Context, project *types.Project, options PullOptions) error
// Create executes the equivalent to a `compose create`
- Create(ctx context.Context, project *types.Project, opts CreateOptions) error
+ Create(ctx context.Context, project *types.Project, options CreateOptions) error
// Start executes the equivalent to a `compose start`
Start(ctx context.Context, projectName string, options StartOptions) error
// Restart restarts containers
@@ -51,36 +104,97 @@ type Service interface {
Ps(ctx context.Context, projectName string, options PsOptions) ([]ContainerSummary, error)
// List executes the equivalent to a `docker stack ls`
List(ctx context.Context, options ListOptions) ([]Stack, error)
- // Convert translate compose model into backend's native format
- Convert(ctx context.Context, project *types.Project, options ConvertOptions) ([]byte, error)
// Kill executes the equivalent to a `compose kill`
- Kill(ctx context.Context, project string, options KillOptions) error
+ Kill(ctx context.Context, projectName string, options KillOptions) error
// RunOneOffContainer creates a service oneoff container and starts its dependencies
RunOneOffContainer(ctx context.Context, project *types.Project, opts RunOptions) (int, error)
// Remove executes the equivalent to a `compose rm`
- Remove(ctx context.Context, project string, options RemoveOptions) error
+ Remove(ctx context.Context, projectName string, options RemoveOptions) error
// Exec executes a command in a running service container
- Exec(ctx context.Context, project string, opts RunOptions) (int, error)
+ Exec(ctx context.Context, projectName string, options RunOptions) (int, error)
+ // Attach STDIN,STDOUT,STDERR to a running service container
+ Attach(ctx context.Context, projectName string, options AttachOptions) error
// Copy copies a file/folder between a service container and the local filesystem
- Copy(ctx context.Context, project string, options CopyOptions) error
+ Copy(ctx context.Context, projectName string, options CopyOptions) error
// Pause executes the equivalent to a `compose pause`
- Pause(ctx context.Context, project string, options PauseOptions) error
+ Pause(ctx context.Context, projectName string, options PauseOptions) error
// UnPause executes the equivalent to a `compose unpause`
- UnPause(ctx context.Context, project string, options PauseOptions) error
+ UnPause(ctx context.Context, projectName string, options PauseOptions) error
// Top executes the equivalent to a `compose top`
Top(ctx context.Context, projectName string, services []string) ([]ContainerProcSummary, error)
// Events executes the equivalent to a `compose events`
- Events(ctx context.Context, project string, options EventsOptions) error
+ Events(ctx context.Context, projectName string, options EventsOptions) error
// Port executes the equivalent to a `compose port`
- Port(ctx context.Context, project string, service string, port int, options PortOptions) (string, int, error)
+ Port(ctx context.Context, projectName string, service string, port uint16, options PortOptions) (string, int, error)
+ // Publish executes the equivalent to a `compose publish`
+ Publish(ctx context.Context, project *types.Project, repository string, options PublishOptions) error
// Images executes the equivalent of a `compose images`
- Images(ctx context.Context, projectName string, options ImagesOptions) ([]ImageSummary, error)
+ Images(ctx context.Context, projectName string, options ImagesOptions) (map[string]ImageSummary, error)
+ // Watch services' development context and sync/notify/rebuild/restart on changes
+ Watch(ctx context.Context, project *types.Project, options WatchOptions) error
+ // Viz generates a graphviz graph of the project services
+ Viz(ctx context.Context, project *types.Project, options VizOptions) (string, error)
+ // Wait blocks until at least one of the services' container exits
+ Wait(ctx context.Context, projectName string, options WaitOptions) (int64, error)
+ // Scale manages numbers of container instances running per service
+ Scale(ctx context.Context, project *types.Project, options ScaleOptions) error
+ // Export a service container's filesystem as a tar archive
+ Export(ctx context.Context, projectName string, options ExportOptions) error
+ // Create a new image from a service container's changes
+ Commit(ctx context.Context, projectName string, options CommitOptions) error
+ // Generate generates a Compose Project from existing containers
+ Generate(ctx context.Context, options GenerateOptions) (*types.Project, error)
+ // Volumes executes the equivalent to a `docker volume ls`
+ Volumes(ctx context.Context, project string, options VolumesOptions) ([]VolumesSummary, error)
+ // LoadProject loads and validates a Compose project from configuration files.
+ LoadProject(ctx context.Context, options ProjectLoadOptions) (*types.Project, error)
+}
+
+type VolumesOptions struct {
+ Services []string
+}
+
+type VolumesSummary = *volume.Volume
+
+type ScaleOptions struct {
+ Services []string
+}
+
+type WaitOptions struct {
+ // Services passed in the command line to be waited
+ Services []string
+ // Executes a down when a container exits
+ DownProjectOnContainerExit bool
+}
+
+type VizOptions struct {
+ // IncludeNetworks if true, network names a container is attached to should appear in the graph node
+ IncludeNetworks bool
+ // IncludePorts if true, ports a container exposes should appear in the graph node
+ IncludePorts bool
+ // IncludeImageName if true, name of the image used to create a container should appear in the graph node
+ IncludeImageName bool
+ // Indentation string to be used to indent graphviz code, e.g. "\t", " "
+ Indentation string
+}
+
+// WatchLogger is a reserved name to log watch events
+const WatchLogger = "#watch"
+
+// WatchOptions group options of the Watch API
+type WatchOptions struct {
+ Build *BuildOptions
+ LogTo LogConsumer
+ Prune bool
+ Services []string
}
// BuildOptions group options of the Build API
type BuildOptions struct {
// Pull always attempt to pull a newer version of the image
Pull bool
+ // Push pushes service images
+ Push bool
// Progress set type of progress output ("auto", "plain", "tty")
Progress string
// Args set build-time args
@@ -91,12 +205,62 @@ type BuildOptions struct {
Quiet bool
// Services passed in the command line to be built
Services []string
+ // Deps also build selected services dependencies
+ Deps bool
// Ssh authentications passed in the command line
SSHs []types.SSHKey
+ // Memory limit for the build container
+ Memory int64
+ // Builder name passed in the command line
+ Builder string
+ // Print don't actually run builder but print equivalent build config
+ Print bool
+ // Check let builder validate build configuration
+ Check bool
+ // Attestations allows to enable attestations generation
+ Attestations bool
+ // Provenance generate a provenance attestation
+ Provenance string
+ // SBOM generate a SBOM attestation
+ SBOM string
+ // Out is the stream to write build progress
+ Out io.Writer
+}
+
+// Apply mutates project according to build options
+func (o BuildOptions) Apply(project *types.Project) error {
+ platform := project.Environment["DOCKER_DEFAULT_PLATFORM"]
+ for name, service := range project.Services {
+ if service.Provider == nil && service.Image == "" && service.Build == nil {
+ return fmt.Errorf("invalid service %q. Must specify either image or build", name)
+ }
+
+ if service.Build == nil {
+ continue
+ }
+ if platform != "" {
+ if len(service.Build.Platforms) > 0 && !slices.Contains(service.Build.Platforms, platform) {
+ return fmt.Errorf("service %q build.platforms does not support value set by DOCKER_DEFAULT_PLATFORM: %s", name, platform)
+ }
+ service.Platform = platform
+ }
+ if service.Platform != "" {
+ if len(service.Build.Platforms) > 0 && !slices.Contains(service.Build.Platforms, service.Platform) {
+ return fmt.Errorf("service %q build configuration does not support platform: %s", name, service.Platform)
+ }
+ }
+
+ service.Build.Pull = service.Build.Pull || o.Pull
+ service.Build.NoCache = service.Build.NoCache || o.NoCache
+
+ project.Services[name] = service
+ }
+ return nil
}
// CreateOptions group options of the Create API
type CreateOptions struct {
+ Build *BuildOptions
// Services defines the services user interacts with
Services []string
// Remove legacy containers for services that are not defined in the project
@@ -109,7 +273,7 @@ type CreateOptions struct {
RecreateDependencies string
// Inherit reuse anonymous volumes from previous container
Inherit bool
- // Timeout set delay to wait for container to gracelfuly stop before sending SIGKILL
+ // Timeout set delay to wait for container to gracefully stop before sending SIGKILL
Timeout *time.Duration
// QuietPull makes the pulling process quiet
QuietPull bool
@@ -117,30 +281,49 @@ type CreateOptions struct {
// StartOptions group options of the Start API
type StartOptions struct {
- // Project is the compose project used to define this app. Might be nil if user ran `start` just with project name
+ // Project is the compose project used to define this app. Might be nil if user ran command just with project name
Project *types.Project
// Attach to container and forward logs if not nil
Attach LogConsumer
// AttachTo set the services to attach to
AttachTo []string
- // CascadeStop stops the application when a container stops
- CascadeStop bool
+ // OnExit defines behavior when a container stops
+ OnExit Cascade
// ExitCodeFrom return exit code from specified service
ExitCodeFrom string
// Wait won't return until containers reached the running|healthy state
- Wait bool
+ Wait bool
+ WaitTimeout time.Duration
+ // Services passed in the command line to be started
+ Services []string
+ Watch bool
+ NavigationMenu bool
}
+type Cascade int
+
+const (
+ CascadeIgnore Cascade = iota
+ CascadeStop Cascade = iota
+ CascadeFail Cascade = iota
+)
+
// RestartOptions group options of the Restart API
type RestartOptions struct {
+ // Project is the compose project used to define this app. Might be nil if user ran command just with project name
+ Project *types.Project
// Timeout override container restart timeout
Timeout *time.Duration
// Services passed in the command line to be restarted
Services []string
+ // NoDeps ignores services dependencies
+ NoDeps bool
}
// StopOptions group options of the Stop API
type StopOptions struct {
+ // Project is the compose project used to define this app. Might be nil if user ran command just with project name
+ Project *types.Project
// Timeout override container stop timeout
Timeout *time.Duration
// Services passed in the command line to be stopped
@@ -165,25 +348,32 @@ type DownOptions struct {
Images string
// Volumes remove volumes, both declared in the `volumes` section and anonymous ones
Volumes bool
+ // Services passed in the command line to be stopped
+ Services []string
}
-// ConvertOptions group options of the Convert API
-type ConvertOptions struct {
+// ConfigOptions group options of the Config API
+type ConfigOptions struct {
// Format define the output format used to dump converted application model (json|yaml)
Format string
// Output defines the path to save the application model
Output string
+ // Resolve image reference to digests
+ ResolveImageDigests bool
}
// PushOptions group options of the Push API
type PushOptions struct {
+ Quiet bool
IgnoreFailures bool
+ ImageMandatory bool
}
// PullOptions group options of the Pull API
type PullOptions struct {
- Quiet bool
- IgnoreFailures bool
+ Quiet bool
+ IgnoreFailures bool
+ IgnoreBuildable bool
}
// ImagesOptions group options of the Images API
@@ -193,16 +383,24 @@ type ImagesOptions struct {
// KillOptions group options of the Kill API
type KillOptions struct {
+ // RemoveOrphans will cleanup containers that are not declared on the compose model but own the same labels
+ RemoveOrphans bool
+ // Project is the compose project used to define this app. Might be nil if user ran command just with project name
+ Project *types.Project
// Services passed in the command line to be killed
Services []string
// Signal to send to containers
Signal string
+ // All can be set to true to try to kill all found containers, independently of their state
+ All bool
}
// RemoveOptions group options of the Remove API
type RemoveOptions struct {
- // DryRun just list removable resources
- DryRun bool
+ // Project is the compose project used to define this app. Might be nil if user ran command just with project name
+ Project *types.Project
+ // Stop option passed in the command line
+ Stop bool
// Volumes remove anonymous volumes
Volumes bool
// Force don't ask to confirm removal
@@ -213,6 +411,9 @@ type RemoveOptions struct {
// RunOptions group options of the Run API
type RunOptions struct {
+ CreateOptions
+ // Project is the compose project used to define this app. Might be nil if user ran command just with project name
+ Project *types.Project
Name string
Service string
Command []string
@@ -224,20 +425,32 @@ type RunOptions struct {
WorkingDir string
User string
Environment []string
+ CapAdd []string
+ CapDrop []string
Labels types.Labels
Privileged bool
UseNetworkAliases bool
NoDeps bool
- // QuietPull makes the pulling process quiet
- QuietPull bool
// used by exec
Index int
}
+// AttachOptions group options of the Attach API
+type AttachOptions struct {
+ Project *types.Project
+ Service string
+ Index int
+ DetachKeys string
+ NoStdin bool
+ Proxy bool
+}
+
// EventsOptions group options of the Events API
type EventsOptions struct {
Services []string
Consumer func(event Event) error
+ Since string
+ Until string
}
// Event is a container runtime event served by Events API
@@ -255,6 +468,33 @@ type PortOptions struct {
Index int
}
+// OCIVersion controls manifest generation to ensure compatibility
+// with different registries.
+//
+// Currently, this is not exposed as an option to the user – Compose uses
+// OCI 1.0 mode automatically for ECR registries based on domain and OCI 1.1
+// for all other registries.
+//
+// There are likely other popular registries that do not support the OCI 1.1
+// format, so it might make sense to expose this as a CLI flag or see if
+// there's a way to generically probe the registry for support level.
+type OCIVersion string
+
+const (
+ OCIVersion1_0 OCIVersion = "1.0"
+ OCIVersion1_1 OCIVersion = "1.1"
+)
+
+// PublishOptions group options of the Publish API
+type PublishOptions struct {
+ ResolveImageDigests bool
+ Application bool
+ WithEnvironment bool
+ OCIVersion OCIVersion
+ // Use plain HTTP to access registry. Should only be used for testing purpose
+ InsecureRegistry bool
+}
+
func (e Event) String() string {
t := e.Timestamp.Format("2006-01-02 15:04:05.000000")
var attr []string
@@ -262,7 +502,6 @@ func (e Event) String() string {
attr = append(attr, fmt.Sprintf("%s=%s", k, v))
}
return fmt.Sprintf("%s container %s %s (%s)\n", t, e.Status, e.Container, strings.Join(attr, ", "))
-
}
// ListOptions group options of the ls API
@@ -272,6 +511,7 @@ type ListOptions struct {
// PsOptions group options of the Ps API
type PsOptions struct {
+ Project *types.Project
All bool
Services []string
}
@@ -296,15 +536,25 @@ type PortPublisher struct {
// ContainerSummary hold high-level description of a container
type ContainerSummary struct {
- ID string
- Name string
- Command string
- Project string
- Service string
- State string
- Health string
- ExitCode int
- Publishers PortPublishers
+ ID string
+ Name string
+ Names []string
+ Image string
+ Command string
+ Project string
+ Service string
+ Created int64
+ State string
+ Status string
+ Health string
+ ExitCode int
+ Publishers PortPublishers
+ Labels map[string]string
+ SizeRw int64 `json:",omitempty"`
+ SizeRootFs int64 `json:",omitempty"`
+ Mounts []string
+ Networks []string
+ LocalVolumes int
}
// PortPublishers is a slice of PortPublisher
@@ -342,15 +592,19 @@ type ContainerProcSummary struct {
Name string
Processes [][]string
Titles []string
+ Service string
+ Replica string
}
// ImageSummary holds container image description
type ImageSummary struct {
- ID string
- ContainerName string
- Repository string
- Tag string
- Size int64
+ ID string
+ Repository string
+ Tag string
+ Platform platforms.Platform
+ Size int64
+ Created *time.Time
+ LastTagTime time.Time
}
// ServiceStatus hold status about a service
@@ -365,6 +619,8 @@ type ServiceStatus struct {
// LogOptions defines optional parameters for the `Log` API
type LogOptions struct {
+ Project *types.Project
+ Index int
Services []string
Tail string
Since string
@@ -377,6 +633,35 @@ type LogOptions struct {
type PauseOptions struct {
// Services passed in the command line to be started
Services []string
+ // Project is the compose project used to define this app. Might be nil if user ran command just with project name
+ Project *types.Project
+}
+
+// ExportOptions group options of the Export API
+type ExportOptions struct {
+ Service string
+ Index int
+ Output string
+}
+
+// CommitOptions group options of the Commit API
+type CommitOptions struct {
+ Service string
+ Reference string
+
+ Pause bool
+ Comment string
+ Author string
+ Changes opts.ListOpts
+
+ Index int
+}
+
+type GenerateOptions struct {
+ // ProjectName to set in the Compose file
+ ProjectName string
+ // Containers passed in the command line to be used as reference for service definition
+ Containers []string
}
const (
@@ -414,9 +699,9 @@ type Stack struct {
// LogConsumer is a callback to process log messages from services
type LogConsumer interface {
- Log(service, container, message string)
+ Log(containerName, message string)
+ Err(containerName, message string)
Status(container, msg string)
- Register(container string)
}
// ContainerEventListener is a callback to process ContainerEvent from services
@@ -425,23 +710,50 @@ type ContainerEventListener func(event ContainerEvent)
// ContainerEvent notify an event has been collected on source container implementing Service
type ContainerEvent struct {
Type int
- Container string
- Service string
- Line string
- // ContainerEventExit only
+ Time int64
+ Container *ContainerSummary
+ // Source is the name of the container _without the project prefix_.
+ //
+ // This is only suitable for display purposes within Compose, as it's
+ // not guaranteed to be unique across services.
+ Source string
+ ID string
+ Service string
+ Line string
+ // ExitCode is only set on ContainerEventExited events
ExitCode int
Restarting bool
}
const (
- // ContainerEventLog is a ContainerEvent of type log. Line is set
+ // ContainerEventLog is a ContainerEvent of type log on stdout. Line is set
ContainerEventLog = iota
- // ContainerEventAttach is a ContainerEvent of type attach. First event sent about a container
- ContainerEventAttach
+ // ContainerEventErr is a ContainerEvent of type log on stderr. Line is set
+ ContainerEventErr
+ // ContainerEventStarted let consumer know a container has been started
+ ContainerEventStarted
+ // ContainerEventRestarted let consumer know a container has been restarted
+ ContainerEventRestarted
// ContainerEventStopped is a ContainerEvent of type stopped.
ContainerEventStopped
- // ContainerEventExit is a ContainerEvent of type exit. ExitCode is set
- ContainerEventExit
- // UserCancel user cancelled compose up, we are stopping containers
- UserCancel
+ // ContainerEventCreated let consumer know a new container has been created
+ ContainerEventCreated
+ // ContainerEventRecreated let consumer know container stopped but his being replaced
+ ContainerEventRecreated
+ // ContainerEventExited is a ContainerEvent of type exit. ExitCode is set
+ ContainerEventExited
+ // UserCancel user canceled compose up, we are stopping containers
+ HookEventLog
)
+
+// Separator is used for naming components
+var Separator = "-"
+
+// GetImageNameOrDefault computes the default image name for a service, used to tag built images
+func GetImageNameOrDefault(service types.ServiceConfig, projectName string) string {
+ imageName := service.Image
+ if imageName == "" {
+ imageName = projectName + Separator + service.Name
+ }
+ return imageName
+}
diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go
index 0d963315137..fc44abe7f1a 100644
--- a/pkg/api/api_test.go
+++ b/pkg/api/api_test.go
@@ -19,7 +19,7 @@ package api
import (
"testing"
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/types"
"gotest.tools/v3/assert"
)
diff --git a/pkg/api/context.go b/pkg/api/context.go
new file mode 100644
index 00000000000..af49c5d2433
--- /dev/null
+++ b/pkg/api/context.go
@@ -0,0 +1,32 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package api
+
+// ContextInfo provides Docker context information for advanced scenarios
+type ContextInfo interface {
+ // CurrentContext returns the name of the current Docker context
+ // Returns "default" for simple clients without context support
+ CurrentContext() string
+
+ // ServerOSType returns the Docker daemon's operating system (linux/windows/darwin)
+ // Used for OS-specific compatibility checks
+ ServerOSType() string
+
+ // BuildKitEnabled determines whether BuildKit should be used for builds
+ // Checks DOCKER_BUILDKIT env var, config, and daemon capabilities
+ BuildKitEnabled() (bool, error)
+}
diff --git a/pkg/api/errors.go b/pkg/api/errors.go
index 4cdcd80094b..7bf4d4a02c3 100644
--- a/pkg/api/errors.go
+++ b/pkg/api/errors.go
@@ -17,11 +17,11 @@
package api
import (
- "github.com/pkg/errors"
+ "errors"
)
const (
- //ExitCodeLoginRequired exit code when command cannot execute because it requires cloud login
+ // ExitCodeLoginRequired exit code when command cannot execute because it requires cloud login
// This will be used by VSCode to detect when creating context if the user needs to login first
ExitCodeLoginRequired = 5
)
@@ -35,12 +35,7 @@ var (
ErrForbidden = errors.New("forbidden")
// ErrUnknown is returned when the error type is unmapped
ErrUnknown = errors.New("unknown")
- // ErrLoginFailed is returned when login failed
- ErrLoginFailed = errors.New("login failed")
- // ErrLoginRequired is returned when login is required for a specific action
- ErrLoginRequired = errors.New("login required")
- // ErrNotImplemented is returned when a backend doesn't implement
- // an action
+ // ErrNotImplemented is returned when a backend doesn't implement an action
ErrNotImplemented = errors.New("not implemented")
// ErrUnsupportedFlag is returned when a backend doesn't support a flag
ErrUnsupportedFlag = errors.New("unsupported flag")
@@ -48,9 +43,8 @@ var (
ErrCanceled = errors.New("canceled")
// ErrParsingFailed is returned when a string cannot be parsed
ErrParsingFailed = errors.New("parsing failed")
- // ErrWrongContextType is returned when the caller tries to get a context
- // with the wrong type
- ErrWrongContextType = errors.New("wrong context type")
+ // ErrNoResources is returned when operation didn't selected any resource
+ ErrNoResources = errors.New("no resources")
)
// IsNotFoundError returns true if the unwrapped error is ErrNotFound
diff --git a/pkg/api/errors_test.go b/pkg/api/errors_test.go
index ea47edb6148..d221db3fdec 100644
--- a/pkg/api/errors_test.go
+++ b/pkg/api/errors_test.go
@@ -17,35 +17,36 @@
package api
import (
+ "errors"
+ "fmt"
"testing"
- "github.com/pkg/errors"
"gotest.tools/v3/assert"
)
func TestIsNotFound(t *testing.T) {
- err := errors.Wrap(ErrNotFound, `object "name"`)
+ err := fmt.Errorf(`object "name": %w`, ErrNotFound)
assert.Assert(t, IsNotFoundError(err))
assert.Assert(t, !IsNotFoundError(errors.New("another error")))
}
func TestIsAlreadyExists(t *testing.T) {
- err := errors.Wrap(ErrAlreadyExists, `object "name"`)
+ err := fmt.Errorf(`object "name": %w`, ErrAlreadyExists)
assert.Assert(t, IsAlreadyExistsError(err))
assert.Assert(t, !IsAlreadyExistsError(errors.New("another error")))
}
func TestIsForbidden(t *testing.T) {
- err := errors.Wrap(ErrForbidden, `object "name"`)
+ err := fmt.Errorf(`object "name": %w`, ErrForbidden)
assert.Assert(t, IsForbiddenError(err))
assert.Assert(t, !IsForbiddenError(errors.New("another error")))
}
func TestIsUnknown(t *testing.T) {
- err := errors.Wrap(ErrUnknown, `object "name"`)
+ err := fmt.Errorf(`object "name": %w`, ErrUnknown)
assert.Assert(t, IsUnknownError(err))
assert.Assert(t, !IsUnknownError(errors.New("another error")))
diff --git a/pkg/api/event.go b/pkg/api/event.go
new file mode 100644
index 00000000000..8a14fa3165d
--- /dev/null
+++ b/pkg/api/event.go
@@ -0,0 +1,103 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package api
+
+import (
+ "context"
+)
+
+// EventStatus indicates the status of an action
+type EventStatus int
+
+const (
+ // Working means that the current task is working
+ Working EventStatus = iota
+ // Done means that the current task is done
+ Done
+ // Warning means that the current task has warning
+ Warning
+ // Error means that the current task has errored
+ Error
+)
+
+// ResourceCompose is a special resource ID used when event applies to all resources in the application
+const ResourceCompose = "Compose"
+
+const (
+ StatusError = "Error"
+ StatusCreating = "Creating"
+ StatusStarting = "Starting"
+ StatusStarted = "Started"
+ StatusWaiting = "Waiting"
+ StatusHealthy = "Healthy"
+ StatusExited = "Exited"
+ StatusRestarting = "Restarting"
+ StatusRestarted = "Restarted"
+ StatusRunning = "Running"
+ StatusCreated = "Created"
+ StatusStopping = "Stopping"
+ StatusStopped = "Stopped"
+ StatusKilling = "Killing"
+ StatusKilled = "Killed"
+ StatusRemoving = "Removing"
+ StatusRemoved = "Removed"
+ StatusBuilding = "Building"
+ StatusBuilt = "Built"
+ StatusPulling = "Pulling"
+ StatusPulled = "Pulled"
+ StatusCommitting = "Committing"
+ StatusCommitted = "Committed"
+ StatusCopying = "Copying"
+ StatusCopied = "Copied"
+ StatusExporting = "Exporting"
+ StatusExported = "Exported"
+)
+
+// Resource represents status change and progress for a compose resource.
+type Resource struct {
+ ID string
+ ParentID string
+ Text string
+ Details string
+ Status EventStatus
+ Current int64
+ Percent int
+ Total int64
+}
+
+func (e *Resource) StatusText() string {
+ switch e.Status {
+ case Working:
+ return "Working"
+ case Warning:
+ return "Warning"
+ case Done:
+ return "Done"
+ default:
+ return "Error"
+ }
+}
+
+// EventProcessor is notified about Compose operations and tasks
+type EventProcessor interface {
+ // Start is triggered as a Compose operation is starting with context
+ Start(ctx context.Context, operation string)
+ // On notify about (sub)task and progress processing operation
+ On(events ...Resource)
+ // Done is triggered as a Compose operation completed
+ Done(operation string, success bool)
+}
diff --git a/pkg/api/labels.go b/pkg/api/labels.go
index c0ed9ea7b6e..3a0f684b98b 100644
--- a/pkg/api/labels.go
+++ b/pkg/api/labels.go
@@ -17,11 +17,9 @@
package api
import (
- "fmt"
-
"github.com/hashicorp/go-version"
- "github.com/docker/compose/v2/internal"
+ "github.com/docker/compose/v5/internal"
)
const (
@@ -51,8 +49,12 @@ const (
ImageDigestLabel = "com.docker.compose.image"
// DependenciesLabel stores service dependencies
DependenciesLabel = "com.docker.compose.depends_on"
- // VersionLabel stores the compose tool version used to run application
+ // VersionLabel stores the compose tool version used to build/run application
VersionLabel = "com.docker.compose.version"
+ // ImageBuilderLabel stores the builder (classic or BuildKit) used to produce the image.
+ ImageBuilderLabel = "com.docker.compose.image.builder"
+ // ContainerReplaceLabel is set when container is created to replace another container (recreated)
+ ContainerReplaceLabel = "com.docker.compose.replace"
)
// ComposeVersion is the compose tool version as declared by label VersionLabel
@@ -61,9 +63,6 @@ var ComposeVersion string
func init() {
v, err := version.NewVersion(internal.Version)
if err == nil {
- segments := v.Segments()
- if len(segments) > 2 {
- ComposeVersion = fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2])
- }
+ ComposeVersion = v.Core().String()
}
}
diff --git a/pkg/api/labels_test.go b/pkg/api/labels_test.go
new file mode 100644
index 00000000000..61b2d8b61eb
--- /dev/null
+++ b/pkg/api/labels_test.go
@@ -0,0 +1,35 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package api
+
+import (
+ "testing"
+
+ "github.com/docker/compose/v5/internal"
+ "github.com/hashicorp/go-version"
+ "gotest.tools/v3/assert"
+)
+
+func TestComposeVersionInitialization(t *testing.T) {
+ v, err := version.NewVersion(internal.Version)
+ if err != nil {
+ assert.Equal(t, "", ComposeVersion, "ComposeVersion should be empty for a non-semver internal version (e.g. 'devel')")
+ } else {
+ expected := v.Core().String()
+ assert.Equal(t, expected, ComposeVersion, "ComposeVersion should be the core of internal.Version")
+ }
+}
diff --git a/pkg/api/proxy.go b/pkg/api/proxy.go
deleted file mode 100644
index 44adf646ecf..00000000000
--- a/pkg/api/proxy.go
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package api
-
-import (
- "context"
-
- "github.com/compose-spec/compose-go/types"
-)
-
-// ServiceProxy implements Service by delegating to implementation functions. This allows lazy init and per-method overrides
-type ServiceProxy struct {
- BuildFn func(ctx context.Context, project *types.Project, options BuildOptions) error
- PushFn func(ctx context.Context, project *types.Project, options PushOptions) error
- PullFn func(ctx context.Context, project *types.Project, opts PullOptions) error
- CreateFn func(ctx context.Context, project *types.Project, opts CreateOptions) error
- StartFn func(ctx context.Context, projectName string, options StartOptions) error
- RestartFn func(ctx context.Context, projectName string, options RestartOptions) error
- StopFn func(ctx context.Context, projectName string, options StopOptions) error
- UpFn func(ctx context.Context, project *types.Project, options UpOptions) error
- DownFn func(ctx context.Context, projectName string, options DownOptions) error
- LogsFn func(ctx context.Context, projectName string, consumer LogConsumer, options LogOptions) error
- PsFn func(ctx context.Context, projectName string, options PsOptions) ([]ContainerSummary, error)
- ListFn func(ctx context.Context, options ListOptions) ([]Stack, error)
- ConvertFn func(ctx context.Context, project *types.Project, options ConvertOptions) ([]byte, error)
- KillFn func(ctx context.Context, project string, options KillOptions) error
- RunOneOffContainerFn func(ctx context.Context, project *types.Project, opts RunOptions) (int, error)
- RemoveFn func(ctx context.Context, project string, options RemoveOptions) error
- ExecFn func(ctx context.Context, project string, opts RunOptions) (int, error)
- CopyFn func(ctx context.Context, project string, options CopyOptions) error
- PauseFn func(ctx context.Context, project string, options PauseOptions) error
- UnPauseFn func(ctx context.Context, project string, options PauseOptions) error
- TopFn func(ctx context.Context, projectName string, services []string) ([]ContainerProcSummary, error)
- EventsFn func(ctx context.Context, project string, options EventsOptions) error
- PortFn func(ctx context.Context, project string, service string, port int, options PortOptions) (string, int, error)
- ImagesFn func(ctx context.Context, projectName string, options ImagesOptions) ([]ImageSummary, error)
- interceptors []Interceptor
-}
-
-// NewServiceProxy produces a ServiceProxy
-func NewServiceProxy() *ServiceProxy {
- return &ServiceProxy{}
-}
-
-// Interceptor allow to customize the compose types.Project before the actual Service method is executed
-type Interceptor func(ctx context.Context, project *types.Project)
-
-var _ Service = &ServiceProxy{}
-
-// WithService configure proxy to use specified Service as delegate
-func (s *ServiceProxy) WithService(service Service) *ServiceProxy {
- s.BuildFn = service.Build
- s.PushFn = service.Push
- s.PullFn = service.Pull
- s.CreateFn = service.Create
- s.StartFn = service.Start
- s.RestartFn = service.Restart
- s.StopFn = service.Stop
- s.UpFn = service.Up
- s.DownFn = service.Down
- s.LogsFn = service.Logs
- s.PsFn = service.Ps
- s.ListFn = service.List
- s.ConvertFn = service.Convert
- s.KillFn = service.Kill
- s.RunOneOffContainerFn = service.RunOneOffContainer
- s.RemoveFn = service.Remove
- s.ExecFn = service.Exec
- s.CopyFn = service.Copy
- s.PauseFn = service.Pause
- s.UnPauseFn = service.UnPause
- s.TopFn = service.Top
- s.EventsFn = service.Events
- s.PortFn = service.Port
- s.ImagesFn = service.Images
- return s
-}
-
-// WithInterceptor configures Interceptor to be applied to Service method execution
-func (s *ServiceProxy) WithInterceptor(interceptors ...Interceptor) *ServiceProxy {
- s.interceptors = append(s.interceptors, interceptors...)
- return s
-}
-
-// Build implements Service interface
-func (s *ServiceProxy) Build(ctx context.Context, project *types.Project, options BuildOptions) error {
- if s.BuildFn == nil {
- return ErrNotImplemented
- }
- for _, i := range s.interceptors {
- i(ctx, project)
- }
- return s.BuildFn(ctx, project, options)
-}
-
-// Push implements Service interface
-func (s *ServiceProxy) Push(ctx context.Context, project *types.Project, options PushOptions) error {
- if s.PushFn == nil {
- return ErrNotImplemented
- }
- for _, i := range s.interceptors {
- i(ctx, project)
- }
- return s.PushFn(ctx, project, options)
-}
-
-// Pull implements Service interface
-func (s *ServiceProxy) Pull(ctx context.Context, project *types.Project, options PullOptions) error {
- if s.PullFn == nil {
- return ErrNotImplemented
- }
- for _, i := range s.interceptors {
- i(ctx, project)
- }
- return s.PullFn(ctx, project, options)
-}
-
-// Create implements Service interface
-func (s *ServiceProxy) Create(ctx context.Context, project *types.Project, options CreateOptions) error {
- if s.CreateFn == nil {
- return ErrNotImplemented
- }
- for _, i := range s.interceptors {
- i(ctx, project)
- }
- return s.CreateFn(ctx, project, options)
-}
-
-// Start implements Service interface
-func (s *ServiceProxy) Start(ctx context.Context, projectName string, options StartOptions) error {
- if s.StartFn == nil {
- return ErrNotImplemented
- }
- return s.StartFn(ctx, projectName, options)
-}
-
-// Restart implements Service interface
-func (s *ServiceProxy) Restart(ctx context.Context, projectName string, options RestartOptions) error {
- if s.RestartFn == nil {
- return ErrNotImplemented
- }
- return s.RestartFn(ctx, projectName, options)
-}
-
-// Stop implements Service interface
-func (s *ServiceProxy) Stop(ctx context.Context, projectName string, options StopOptions) error {
- if s.StopFn == nil {
- return ErrNotImplemented
- }
- return s.StopFn(ctx, projectName, options)
-}
-
-// Up implements Service interface
-func (s *ServiceProxy) Up(ctx context.Context, project *types.Project, options UpOptions) error {
- if s.UpFn == nil {
- return ErrNotImplemented
- }
- for _, i := range s.interceptors {
- i(ctx, project)
- }
- return s.UpFn(ctx, project, options)
-}
-
-// Down implements Service interface
-func (s *ServiceProxy) Down(ctx context.Context, project string, options DownOptions) error {
- if s.DownFn == nil {
- return ErrNotImplemented
- }
- return s.DownFn(ctx, project, options)
-}
-
-// Logs implements Service interface
-func (s *ServiceProxy) Logs(ctx context.Context, projectName string, consumer LogConsumer, options LogOptions) error {
- if s.LogsFn == nil {
- return ErrNotImplemented
- }
- return s.LogsFn(ctx, projectName, consumer, options)
-}
-
-// Ps implements Service interface
-func (s *ServiceProxy) Ps(ctx context.Context, project string, options PsOptions) ([]ContainerSummary, error) {
- if s.PsFn == nil {
- return nil, ErrNotImplemented
- }
- return s.PsFn(ctx, project, options)
-}
-
-// List implements Service interface
-func (s *ServiceProxy) List(ctx context.Context, options ListOptions) ([]Stack, error) {
- if s.ListFn == nil {
- return nil, ErrNotImplemented
- }
- return s.ListFn(ctx, options)
-}
-
-// Convert implements Service interface
-func (s *ServiceProxy) Convert(ctx context.Context, project *types.Project, options ConvertOptions) ([]byte, error) {
- if s.ConvertFn == nil {
- return nil, ErrNotImplemented
- }
- for _, i := range s.interceptors {
- i(ctx, project)
- }
- return s.ConvertFn(ctx, project, options)
-}
-
-// Kill implements Service interface
-func (s *ServiceProxy) Kill(ctx context.Context, project string, options KillOptions) error {
- if s.KillFn == nil {
- return ErrNotImplemented
- }
- return s.KillFn(ctx, project, options)
-}
-
-// RunOneOffContainer implements Service interface
-func (s *ServiceProxy) RunOneOffContainer(ctx context.Context, project *types.Project, options RunOptions) (int, error) {
- if s.RunOneOffContainerFn == nil {
- return 0, ErrNotImplemented
- }
- for _, i := range s.interceptors {
- i(ctx, project)
- }
- return s.RunOneOffContainerFn(ctx, project, options)
-}
-
-// Remove implements Service interface
-func (s *ServiceProxy) Remove(ctx context.Context, project string, options RemoveOptions) error {
- if s.RemoveFn == nil {
- return ErrNotImplemented
- }
- return s.RemoveFn(ctx, project, options)
-}
-
-// Exec implements Service interface
-func (s *ServiceProxy) Exec(ctx context.Context, project string, options RunOptions) (int, error) {
- if s.ExecFn == nil {
- return 0, ErrNotImplemented
- }
- return s.ExecFn(ctx, project, options)
-}
-
-// Copy implements Service interface
-func (s *ServiceProxy) Copy(ctx context.Context, project string, options CopyOptions) error {
- if s.CopyFn == nil {
- return ErrNotImplemented
- }
- return s.CopyFn(ctx, project, options)
-}
-
-// Pause implements Service interface
-func (s *ServiceProxy) Pause(ctx context.Context, project string, options PauseOptions) error {
- if s.PauseFn == nil {
- return ErrNotImplemented
- }
- return s.PauseFn(ctx, project, options)
-}
-
-// UnPause implements Service interface
-func (s *ServiceProxy) UnPause(ctx context.Context, project string, options PauseOptions) error {
- if s.UnPauseFn == nil {
- return ErrNotImplemented
- }
- return s.UnPauseFn(ctx, project, options)
-}
-
-// Top implements Service interface
-func (s *ServiceProxy) Top(ctx context.Context, project string, services []string) ([]ContainerProcSummary, error) {
- if s.TopFn == nil {
- return nil, ErrNotImplemented
- }
- return s.TopFn(ctx, project, services)
-}
-
-// Events implements Service interface
-func (s *ServiceProxy) Events(ctx context.Context, project string, options EventsOptions) error {
- if s.EventsFn == nil {
- return ErrNotImplemented
- }
- return s.EventsFn(ctx, project, options)
-}
-
-// Port implements Service interface
-func (s *ServiceProxy) Port(ctx context.Context, project string, service string, port int, options PortOptions) (string, int, error) {
- if s.PortFn == nil {
- return "", 0, ErrNotImplemented
- }
- return s.PortFn(ctx, project, service, port, options)
-}
-
-// Images implements Service interface
-func (s *ServiceProxy) Images(ctx context.Context, project string, options ImagesOptions) ([]ImageSummary, error) {
- if s.ImagesFn == nil {
- return nil, ErrNotImplemented
- }
- return s.ImagesFn(ctx, project, options)
-}
diff --git a/pkg/bridge/convert.go b/pkg/bridge/convert.go
new file mode 100644
index 00000000000..c74dbbfacf6
--- /dev/null
+++ b/pkg/bridge/convert.go
@@ -0,0 +1,224 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bridge
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+ "strconv"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/containerd/errdefs"
+ "github.com/docker/cli/cli/command"
+ cli "github.com/docker/cli/cli/command/container"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/utils"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/pkg/jsonmessage"
+ "github.com/docker/go-connections/nat"
+ "gopkg.in/yaml.v3"
+)
+
+type ConvertOptions struct {
+ Output string
+ Templates string
+ Transformations []string
+}
+
+func Convert(ctx context.Context, dockerCli command.Cli, project *types.Project, opts ConvertOptions) error {
+ if len(opts.Transformations) == 0 {
+ opts.Transformations = []string{DefaultTransformerImage}
+ }
+ // Load image references, secrets and configs, also expose ports
+ project, err := LoadAdditionalResources(ctx, dockerCli, project)
+ if err != nil {
+ return err
+ }
+ // for user to rely on compose.yaml attribute names, not go struct ones, we marshall back into YAML
+ raw, err := project.MarshalYAML(types.WithSecretContent)
+ // Marshall to YAML
+ if err != nil {
+ return fmt.Errorf("cannot render project into yaml: %w", err)
+ }
+ var model map[string]any
+ err = yaml.Unmarshal(raw, &model)
+ if err != nil {
+ return fmt.Errorf("cannot render project into yaml: %w", err)
+ }
+
+ if opts.Output != "" {
+ _ = os.RemoveAll(opts.Output)
+ err := os.MkdirAll(opts.Output, 0o744)
+ if err != nil && !os.IsExist(err) {
+ return fmt.Errorf("cannot create output folder: %w", err)
+ }
+ }
+ // Run Transformers images
+ return convert(ctx, dockerCli, model, opts)
+}
+
+func convert(ctx context.Context, dockerCli command.Cli, model map[string]any, opts ConvertOptions) error {
+ raw, err := yaml.Marshal(model)
+ if err != nil {
+ return err
+ }
+
+ dir := os.TempDir()
+ composeYaml := filepath.Join(dir, "compose.yaml")
+ err = os.WriteFile(composeYaml, raw, 0o600)
+ if err != nil {
+ return err
+ }
+
+ out, err := filepath.Abs(opts.Output)
+ if err != nil {
+ return err
+ }
+ binds := []string{
+ fmt.Sprintf("%s:%s", dir, "/in"),
+ fmt.Sprintf("%s:%s", out, "/out"),
+ }
+ if opts.Templates != "" {
+ templateDir, err := filepath.Abs(opts.Templates)
+ if err != nil {
+ return err
+ }
+ binds = append(binds, fmt.Sprintf("%s:%s", templateDir, "/templates"))
+ }
+
+ for _, transformation := range opts.Transformations {
+ _, err = inspectWithPull(ctx, dockerCli, transformation)
+ if err != nil {
+ return err
+ }
+
+ containerConfig := &container.Config{
+ Image: transformation,
+ Env: []string{"LICENSE_AGREEMENT=true"},
+ }
+ // On POSIX systems, this is a decimal number representing the uid.
+ // On Windows, this is a security identifier (SID) in a string format and the engine isn't able to manage it
+ if runtime.GOOS != "windows" {
+ usr, err := user.Current()
+ if err != nil {
+ return err
+ }
+ containerConfig.User = usr.Uid
+ }
+ created, err := dockerCli.Client().ContainerCreate(ctx, containerConfig, &container.HostConfig{
+ AutoRemove: true,
+ Binds: binds,
+ }, &network.NetworkingConfig{}, nil, "")
+ if err != nil {
+ return err
+ }
+
+ err = cli.RunStart(ctx, dockerCli, &cli.StartOptions{
+ Attach: true,
+ Containers: []string{created.ID},
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// LoadAdditionalResources loads additional resources from the project, such as image references, secrets, configs and exposed ports
+func LoadAdditionalResources(ctx context.Context, dockerCLI command.Cli, project *types.Project) (*types.Project, error) {
+ for name, service := range project.Services {
+ imageName := api.GetImageNameOrDefault(service, project.Name)
+
+ inspect, err := inspectWithPull(ctx, dockerCLI, imageName)
+ if err != nil {
+ return nil, err
+ }
+ service.Image = imageName
+ exposed := utils.Set[string]{}
+ exposed.AddAll(service.Expose...)
+ for port := range inspect.Config.ExposedPorts {
+ exposed.Add(nat.Port(port).Port())
+ }
+ for _, port := range service.Ports {
+ exposed.Add(strconv.Itoa(int(port.Target)))
+ }
+ service.Expose = exposed.Elements()
+ project.Services[name] = service
+ }
+
+ for name, secret := range project.Secrets {
+ f, err := loadFileObject(types.FileObjectConfig(secret))
+ if err != nil {
+ return nil, err
+ }
+ project.Secrets[name] = types.SecretConfig(f)
+ }
+
+ for name, config := range project.Configs {
+ f, err := loadFileObject(types.FileObjectConfig(config))
+ if err != nil {
+ return nil, err
+ }
+ project.Configs[name] = types.ConfigObjConfig(f)
+ }
+
+ return project, nil
+}
+
+func loadFileObject(conf types.FileObjectConfig) (types.FileObjectConfig, error) {
+ if !conf.External {
+ switch {
+ case conf.Environment != "":
+ conf.Content = os.Getenv(conf.Environment)
+ case conf.File != "":
+ bytes, err := os.ReadFile(conf.File)
+ if err != nil {
+ return conf, err
+ }
+ conf.Content = string(bytes)
+ }
+ }
+ return conf, nil
+}
+
+func inspectWithPull(ctx context.Context, dockerCli command.Cli, imageName string) (image.InspectResponse, error) {
+ inspect, err := dockerCli.Client().ImageInspect(ctx, imageName)
+ if errdefs.IsNotFound(err) {
+ var stream io.ReadCloser
+ stream, err = dockerCli.Client().ImagePull(ctx, imageName, image.PullOptions{})
+ if err != nil {
+ return image.InspectResponse{}, err
+ }
+ defer func() { _ = stream.Close() }()
+
+ err = jsonmessage.DisplayJSONMessagesToStream(stream, dockerCli.Out(), nil)
+ if err != nil {
+ return image.InspectResponse{}, err
+ }
+ if inspect, err = dockerCli.Client().ImageInspect(ctx, imageName); err != nil {
+ return image.InspectResponse{}, err
+ }
+ }
+ return inspect, err
+}
diff --git a/pkg/bridge/transformers.go b/pkg/bridge/transformers.go
new file mode 100644
index 00000000000..dbf4fc6d9dc
--- /dev/null
+++ b/pkg/bridge/transformers.go
@@ -0,0 +1,120 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package bridge
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/network"
+ "github.com/moby/go-archive"
+)
+
+const (
+ TransformerLabel = "com.docker.compose.bridge"
+ DefaultTransformerImage = "docker/compose-bridge-kubernetes"
+
+ templatesPath = "/templates"
+)
+
+type CreateTransformerOptions struct {
+ Dest string
+ From string
+}
+
+func CreateTransformer(ctx context.Context, dockerCli command.Cli, options CreateTransformerOptions) error {
+ if options.From == "" {
+ options.From = DefaultTransformerImage
+ }
+ out, err := filepath.Abs(options.Dest)
+ if err != nil {
+ return err
+ }
+
+ if _, err := os.Stat(out); err == nil {
+ return fmt.Errorf("output folder %s already exists", out)
+ }
+
+ tmpl := filepath.Join(out, "templates")
+ err = os.MkdirAll(tmpl, 0o744)
+ if err != nil && !os.IsExist(err) {
+ return fmt.Errorf("cannot create output folder: %w", err)
+ }
+
+ if err := command.ValidateOutputPath(out); err != nil {
+ return err
+ }
+
+ created, err := dockerCli.Client().ContainerCreate(ctx, &container.Config{
+ Image: options.From,
+ }, &container.HostConfig{}, &network.NetworkingConfig{}, nil, "")
+ defer func() {
+ _ = dockerCli.Client().ContainerRemove(context.Background(), created.ID, container.RemoveOptions{Force: true})
+ }()
+
+ if err != nil {
+ return err
+ }
+ content, stat, err := dockerCli.Client().CopyFromContainer(ctx, created.ID, templatesPath)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ _ = content.Close()
+ }()
+
+ srcInfo := archive.CopyInfo{
+ Path: templatesPath,
+ Exists: true,
+ IsDir: stat.Mode.IsDir(),
+ }
+
+ preArchive := content
+ if srcInfo.RebaseName != "" {
+ _, srcBase := archive.SplitPathDirEntry(srcInfo.Path)
+ preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName)
+ }
+
+ if err := archive.CopyTo(preArchive, srcInfo, out); err != nil {
+ return err
+ }
+
+ dockerfile := `FROM docker/compose-bridge-transformer
+LABEL com.docker.compose.bridge=transformation
+COPY templates /templates
+`
+ if err := os.WriteFile(filepath.Join(out, "Dockerfile"), []byte(dockerfile), 0o700); err != nil {
+ return err
+ }
+ _, err = fmt.Fprintf(dockerCli.Out(), "Transformer created in %q\n", out)
+ return err
+}
+
+func ListTransformers(ctx context.Context, dockerCli command.Cli) ([]image.Summary, error) {
+ api := dockerCli.Client()
+ return api.ImageList(ctx, image.ListOptions{
+ Filters: filters.NewArgs(
+ filters.Arg("label", fmt.Sprintf("%s=%s", TransformerLabel, "transformation")),
+ ),
+ })
+}
diff --git a/pkg/compose/apiSocket.go b/pkg/compose/apiSocket.go
new file mode 100644
index 00000000000..ddc7a029030
--- /dev/null
+++ b/pkg/compose/apiSocket.go
@@ -0,0 +1,92 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli/config/configfile"
+)
+
+// --use-api-socket is not actually supported by the Docker Engine
+// but is a client-side hack (see https://github.com/docker/cli/blob/master/cli/command/container/create.go#L246)
+// we replicate here by transforming the project model
+
+func (s *composeService) useAPISocket(project *types.Project) (*types.Project, error) {
+ useAPISocket := false
+ for _, service := range project.Services {
+ if service.UseAPISocket {
+ useAPISocket = true
+ break
+ }
+ }
+ if !useAPISocket {
+ return project, nil
+ }
+
+ if s.getContextInfo().ServerOSType() == "windows" {
+ return nil, errors.New("use_api_socket can't be used with a Windows Docker Engine")
+ }
+
+ creds, err := s.configFile().GetAllCredentials()
+ if err != nil {
+ return nil, fmt.Errorf("resolving credentials failed: %w", err)
+ }
+
+ newConfig := &configfile.ConfigFile{
+ AuthConfigs: creds,
+ }
+ var configBuf bytes.Buffer
+ if err := newConfig.SaveToWriter(&configBuf); err != nil {
+ return nil, fmt.Errorf("saving creds for API socket: %w", err)
+ }
+
+ project.Configs["#apisocket"] = types.ConfigObjConfig{
+ Content: configBuf.String(),
+ }
+
+ for name, service := range project.Services {
+ if !service.UseAPISocket {
+ continue
+ }
+ service.Volumes = append(service.Volumes, types.ServiceVolumeConfig{
+ Type: types.VolumeTypeBind,
+ Source: "/var/run/docker.sock",
+ Target: "/var/run/docker.sock",
+ })
+
+ _, envvarPresent := service.Environment["DOCKER_CONFIG"]
+
+ // If the DOCKER_CONFIG env var is already present, we assume the client knows
+ // what they're doing and don't inject the creds.
+ if !envvarPresent {
+ // Set our special little location for the config file.
+ path := "/run/secrets/docker"
+ service.Environment["DOCKER_CONFIG"] = &path
+ }
+
+ service.Configs = append(service.Configs, types.ServiceConfigObjConfig{
+ Source: "#apisocket",
+ Target: "/run/secrets/docker/config.json",
+ })
+ project.Services[name] = service
+ }
+ return project, nil
+}
diff --git a/pkg/compose/attach.go b/pkg/compose/attach.go
index 42b815f3f37..47b5888ff52 100644
--- a/pkg/compose/attach.go
+++ b/pkg/compose/attach.go
@@ -18,18 +18,19 @@ package compose
import (
"context"
+ "errors"
"fmt"
"io"
"strings"
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/types"
"github.com/docker/cli/cli/streams"
- moby "github.com/docker/docker/api/types"
+ containerType "github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/stdcopy"
"github.com/moby/term"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/utils"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/utils"
)
func (s *composeService) attach(ctx context.Context, project *types.Project, listener api.ContainerEventListener, selectedServices []string) (Containers, error) {
@@ -37,18 +38,21 @@ func (s *composeService) attach(ctx context.Context, project *types.Project, lis
if err != nil {
return nil, err
}
+ if len(containers) == 0 {
+ return containers, nil
+ }
- containers.sorted() // This enforce predictable colors assignment
+ containers.sorted() // This enforces predictable colors assignment
var names []string
for _, c := range containers {
names = append(names, getContainerNameWithoutProject(c))
}
- fmt.Printf("Attaching to %s\n", strings.Join(names, ", "))
+ _, _ = fmt.Fprintf(s.stdout(), "Attaching to %s\n", strings.Join(names, ", "))
- for _, container := range containers {
- err := s.attachContainer(ctx, container, listener)
+ for _, ctr := range containers {
+ err := s.attachContainer(ctx, ctr, listener)
if err != nil {
return nil, err
}
@@ -56,39 +60,44 @@ func (s *composeService) attach(ctx context.Context, project *types.Project, lis
return containers, err
}
-func (s *composeService) attachContainer(ctx context.Context, container moby.Container, listener api.ContainerEventListener) error {
- serviceName := container.Labels[api.ServiceLabel]
- containerName := getContainerNameWithoutProject(container)
+func (s *composeService) attachContainer(ctx context.Context, container containerType.Summary, listener api.ContainerEventListener) error {
+ service := container.Labels[api.ServiceLabel]
+ name := getContainerNameWithoutProject(container)
+ return s.doAttachContainer(ctx, service, container.ID, name, listener)
+}
- listener(api.ContainerEvent{
- Type: api.ContainerEventAttach,
- Container: containerName,
- Service: serviceName,
- })
+func (s *composeService) doAttachContainer(ctx context.Context, service, id, name string, listener api.ContainerEventListener) error {
+ inspect, err := s.apiClient().ContainerInspect(ctx, id)
+ if err != nil {
+ return err
+ }
- w := utils.GetWriter(func(line string) {
+ wOut := utils.GetWriter(func(line string) {
listener(api.ContainerEvent{
- Type: api.ContainerEventLog,
- Container: containerName,
- Service: serviceName,
- Line: line,
+ Type: api.ContainerEventLog,
+ Source: name,
+ ID: id,
+ Service: service,
+ Line: line,
+ })
+ })
+ wErr := utils.GetWriter(func(line string) {
+ listener(api.ContainerEvent{
+ Type: api.ContainerEventErr,
+ Source: name,
+ ID: id,
+ Service: service,
+ Line: line,
})
})
- inspect, err := s.dockerCli.Client().ContainerInspect(ctx, container.ID)
- if err != nil {
- return err
- }
-
- _, _, err = s.attachContainerStreams(ctx, container.ID, inspect.Config.Tty, nil, w, w)
+ _, _, err = s.attachContainerStreams(ctx, id, inspect.Config.Tty, nil, wOut, wErr)
return err
}
-func (s *composeService) attachContainerStreams(ctx context.Context, container string, tty bool, stdin io.ReadCloser, stdout, stderr io.Writer) (func(), chan bool, error) {
+func (s *composeService) attachContainerStreams(ctx context.Context, container string, tty bool, stdin io.ReadCloser, stdout, stderr io.WriteCloser) (func(), chan bool, error) {
detached := make(chan bool)
- var (
- restore = func() { /* noop */ }
- )
+ restore := func() { /* noop */ }
if stdin != nil {
in := streams.NewIn(stdin)
if in.IsTerminal() {
@@ -112,13 +121,13 @@ func (s *composeService) attachContainerStreams(ctx context.Context, container s
if stdin != nil {
stdin.Close() //nolint:errcheck
}
- streamOut.Close() //nolint:errcheck
}()
if streamIn != nil && stdin != nil {
go func() {
_, err := io.Copy(streamIn, stdin)
- if _, ok := err.(term.EscapeError); ok {
+ var escapeErr term.EscapeError
+ if errors.As(err, &escapeErr) {
close(detached)
}
}()
@@ -126,10 +135,13 @@ func (s *composeService) attachContainerStreams(ctx context.Context, container s
if stdout != nil {
go func() {
+ defer stdout.Close() //nolint:errcheck
+ defer stderr.Close() //nolint:errcheck
+ defer streamOut.Close() //nolint:errcheck
if tty {
- io.Copy(stdout, streamOut) // nolint:errcheck
+ io.Copy(stdout, streamOut) //nolint:errcheck
} else {
- stdcopy.StdCopy(stdout, stderr, streamOut) // nolint:errcheck
+ stdcopy.StdCopy(stdout, stderr, streamOut) //nolint:errcheck
}
}()
}
@@ -139,12 +151,13 @@ func (s *composeService) attachContainerStreams(ctx context.Context, container s
func (s *composeService) getContainerStreams(ctx context.Context, container string) (io.WriteCloser, io.ReadCloser, error) {
var stdout io.ReadCloser
var stdin io.WriteCloser
- cnx, err := s.apiClient().ContainerAttach(ctx, container, moby.ContainerAttachOptions{
- Stream: true,
- Stdin: true,
- Stdout: true,
- Stderr: true,
- Logs: false,
+ cnx, err := s.apiClient().ContainerAttach(ctx, container, containerType.AttachOptions{
+ Stream: true,
+ Stdin: true,
+ Stdout: true,
+ Stderr: true,
+ Logs: false,
+ DetachKeys: s.configFile().DetachKeys,
})
if err == nil {
stdout = ContainerStdout{HijackedResponse: cnx}
@@ -153,7 +166,7 @@ func (s *composeService) getContainerStreams(ctx context.Context, container stri
}
// Fallback to logs API
- logs, err := s.apiClient().ContainerLogs(ctx, container, moby.ContainerLogsOptions{
+ logs, err := s.apiClient().ContainerLogs(ctx, container, containerType.LogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
diff --git a/pkg/compose/attach_service.go b/pkg/compose/attach_service.go
new file mode 100644
index 00000000000..7f41e773a42
--- /dev/null
+++ b/pkg/compose/attach_service.go
@@ -0,0 +1,44 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "strings"
+
+ "github.com/docker/cli/cli/command/container"
+ "github.com/docker/compose/v5/pkg/api"
+)
+
+func (s *composeService) Attach(ctx context.Context, projectName string, options api.AttachOptions) error {
+ projectName = strings.ToLower(projectName)
+ target, err := s.getSpecifiedContainer(ctx, projectName, oneOffInclude, false, options.Service, options.Index)
+ if err != nil {
+ return err
+ }
+
+ detachKeys := options.DetachKeys
+ if detachKeys == "" {
+ detachKeys = s.configFile().DetachKeys
+ }
+
+ var attach container.AttachOptions
+ attach.DetachKeys = detachKeys
+ attach.NoStdin = options.NoStdin
+ attach.Proxy = options.Proxy
+ return container.RunAttach(ctx, s.dockerCli, target.ID, &attach)
+}
diff --git a/pkg/compose/build.go b/pkg/compose/build.go
index 3e080d9eae8..361ae5323a3 100644
--- a/pkg/compose/build.go
+++ b/pkg/compose/build.go
@@ -19,86 +19,89 @@ package compose
import (
"context"
"fmt"
- "path/filepath"
-
- "github.com/compose-spec/compose-go/types"
- "github.com/containerd/containerd/platforms"
- "github.com/docker/buildx/build"
- _ "github.com/docker/buildx/driver/docker" // required to get default driver registered
- "github.com/docker/buildx/util/buildflags"
- xprogress "github.com/docker/buildx/util/progress"
- "github.com/docker/docker/pkg/urlutil"
- bclient "github.com/moby/buildkit/client"
- "github.com/moby/buildkit/session"
- "github.com/moby/buildkit/session/auth/authprovider"
- "github.com/moby/buildkit/session/sshforward/sshprovider"
+ "strings"
+ "time"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/containerd/platforms"
+ "github.com/docker/compose/v5/internal/tracing"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/utils"
specs "github.com/opencontainers/image-spec/specs-go/v1"
-
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/progress"
- "github.com/docker/compose/v2/pkg/utils"
+ "github.com/sirupsen/logrus"
)
func (s *composeService) Build(ctx context.Context, project *types.Project, options api.BuildOptions) error {
- return progress.Run(ctx, func(ctx context.Context) error {
- return s.build(ctx, project, options)
- })
+ err := options.Apply(project)
+ if err != nil {
+ return err
+ }
+ return Run(ctx, func(ctx context.Context) error {
+ return tracing.SpanWrapFunc("project/build", tracing.ProjectOptions(ctx, project),
+ func(ctx context.Context) error {
+ _, err := s.build(ctx, project, options, nil)
+ return err
+ })(ctx)
+ }, "build", s.events)
}
-func (s *composeService) build(ctx context.Context, project *types.Project, options api.BuildOptions) error {
- opts := map[string]build.Options{}
- imagesToBuild := []string{}
+func (s *composeService) build(ctx context.Context, project *types.Project, options api.BuildOptions, localImages map[string]api.ImageSummary) (map[string]string, error) {
+ imageIDs := map[string]string{}
+ serviceToBuild := types.Services{}
- args := flatten(options.Args.Resolve(func(s string) (string, bool) {
- s, ok := project.Environment[s]
- return s, ok
- }))
+ var policy types.DependencyOption = types.IgnoreDependencies
+ if options.Deps {
+ policy = types.IncludeDependencies
+ }
- services, err := project.GetServices(options.Services...)
- if err != nil {
- return err
+ if len(options.Services) == 0 {
+ options.Services = project.ServiceNames()
}
- for _, service := range services {
- if service.Build != nil {
- imageName := getImageName(service, project.Name)
- imagesToBuild = append(imagesToBuild, imageName)
- buildOptions, err := s.toBuildOptions(project, service, imageName, options.SSHs)
- if err != nil {
- return err
- }
- buildOptions.Pull = options.Pull
- buildOptions.BuildArgs = mergeArgs(buildOptions.BuildArgs, args)
- buildOptions.NoCache = options.NoCache
- buildOptions.CacheFrom, err = buildflags.ParseCacheEntry(service.Build.CacheFrom)
- if err != nil {
- return err
- }
+ // also include services used as additional_contexts with service: prefix
+ options.Services = addBuildDependencies(options.Services, project)
+ // Some build dependencies we just introduced may not be enabled
+ var err error
+ project, err = project.WithServicesEnabled(options.Services...)
+ if err != nil {
+ return nil, err
+ }
- for _, image := range service.Build.CacheFrom {
- buildOptions.CacheFrom = append(buildOptions.CacheFrom, bclient.CacheOptionsEntry{
- Type: "registry",
- Attrs: map[string]string{"ref": image},
- })
- }
- opts[imageName] = buildOptions
- }
+ project, err = project.WithSelectedServices(options.Services)
+ if err != nil {
+ return nil, err
}
- _, err = s.doBuild(ctx, project, opts, options.Progress)
- if err == nil {
- if len(imagesToBuild) > 0 && !options.Quiet {
- utils.DisplayScanSuggestMsg()
+ err = project.ForEachService(options.Services, func(serviceName string, service *types.ServiceConfig) error {
+ if service.Build == nil {
+ return nil
}
+ image := api.GetImageNameOrDefault(*service, project.Name)
+ _, localImagePresent := localImages[image]
+ if localImagePresent && service.PullPolicy != types.PullPolicyBuild {
+ return nil
+ }
+ serviceToBuild[serviceName] = *service
+ return nil
+ }, policy)
+ if err != nil || len(serviceToBuild) == 0 {
+ return imageIDs, err
}
- return err
+ bake, err := buildWithBake(s.dockerCli)
+ if err != nil {
+ return nil, err
+ }
+ if bake {
+ return s.doBuildBake(ctx, project, serviceToBuild, options)
+ }
+ return s.doBuildClassic(ctx, project, serviceToBuild, options)
}
-func (s *composeService) ensureImagesExists(ctx context.Context, project *types.Project, quietPull bool) error {
- for _, service := range project.Services {
- if service.Image == "" && service.Build == nil {
- return fmt.Errorf("invalid service %q. Must specify either image or build", service.Name)
+func (s *composeService) ensureImagesExists(ctx context.Context, project *types.Project, buildOpts *api.BuildOptions, quietPull bool) error {
+ for name, service := range project.Services {
+ if service.Provider == nil && service.Image == "" && service.Build == nil {
+ return fmt.Errorf("invalid service %q. Must specify either image or build", name)
}
}
@@ -107,212 +110,159 @@ func (s *composeService) ensureImagesExists(ctx context.Context, project *types.
return err
}
- err = s.pullRequiredImages(ctx, project, images, quietPull)
+ err = tracing.SpanWrapFunc("project/pull", tracing.ProjectOptions(ctx, project),
+ func(ctx context.Context) error {
+ return s.pullRequiredImages(ctx, project, images, quietPull)
+ },
+ )(ctx)
if err != nil {
return err
}
- mode := xprogress.PrinterModeAuto
- if quietPull {
- mode = xprogress.PrinterModeQuiet
- }
- opts, err := s.getBuildOptions(project, images)
- if err != nil {
- return err
- }
- builtImages, err := s.doBuild(ctx, project, opts, mode)
- if err != nil {
- return err
+ if buildOpts != nil {
+ err = tracing.SpanWrapFunc("project/build", tracing.ProjectOptions(ctx, project),
+ func(ctx context.Context) error {
+ builtImages, err := s.build(ctx, project, *buildOpts, images)
+ if err != nil {
+ return err
+ }
+
+ for name, digest := range builtImages {
+ images[name] = api.ImageSummary{
+ Repository: name,
+ ID: digest,
+ LastTagTime: time.Now(),
+ }
+ }
+ return nil
+ },
+ )(ctx)
+ if err != nil {
+ return err
+ }
}
- if len(builtImages) > 0 {
- utils.DisplayScanSuggestMsg()
- }
- for name, digest := range builtImages {
- images[name] = digest
- }
// set digest as com.docker.compose.image label so we can detect outdated containers
- for i, service := range project.Services {
- image := getImageName(service, project.Name)
- digest, ok := images[image]
+ for name, service := range project.Services {
+ image := api.GetImageNameOrDefault(service, project.Name)
+ img, ok := images[image]
if ok {
- if project.Services[i].Labels == nil {
- project.Services[i].Labels = types.Labels{}
- }
- project.Services[i].CustomLabels[api.ImageDigestLabel] = digest
- project.Services[i].Image = image
+ service.CustomLabels.Add(api.ImageDigestLabel, img.ID)
}
+ project.Services[name] = service
}
return nil
}
-func (s *composeService) getBuildOptions(project *types.Project, images map[string]string) (map[string]build.Options, error) {
- opts := map[string]build.Options{}
- for _, service := range project.Services {
- if service.Image == "" && service.Build == nil {
- return nil, fmt.Errorf("invalid service %q. Must specify either image or build", service.Name)
- }
- imageName := getImageName(service, project.Name)
- _, localImagePresent := images[imageName]
-
- if service.Build != nil {
- if localImagePresent && service.PullPolicy != types.PullPolicyBuild {
- continue
- }
- opt, err := s.toBuildOptions(project, service, imageName, []types.SSHKey{})
- if err != nil {
- return nil, err
- }
- opts[imageName] = opt
- continue
- }
- }
- return opts, nil
-
-}
-
-func (s *composeService) getLocalImagesDigests(ctx context.Context, project *types.Project) (map[string]string, error) {
- imageNames := []string{}
+func (s *composeService) getLocalImagesDigests(ctx context.Context, project *types.Project) (map[string]api.ImageSummary, error) {
+ imageNames := utils.Set[string]{}
for _, s := range project.Services {
- imgName := getImageName(s, project.Name)
- if !utils.StringContains(imageNames, imgName) {
- imageNames = append(imageNames, imgName)
+ imageNames.Add(api.GetImageNameOrDefault(s, project.Name))
+ for _, volume := range s.Volumes {
+ if volume.Type == types.VolumeTypeImage {
+ imageNames.Add(volume.Source)
+ }
}
}
- imgs, err := s.getImages(ctx, imageNames)
+ imgs, err := s.getImageSummaries(ctx, imageNames.Elements())
if err != nil {
return nil, err
}
- images := map[string]string{}
- for name, info := range imgs {
- images[name] = info.ID
- }
- for _, s := range project.Services {
- imgName := getImageName(s, project.Name)
- digest, ok := images[imgName]
- if ok {
- s.CustomLabels[api.ImageDigestLabel] = digest
- }
- }
-
- return images, nil
-}
-
-func (s *composeService) doBuild(ctx context.Context, project *types.Project, opts map[string]build.Options, mode string) (map[string]string, error) {
- if len(opts) == 0 {
- return nil, nil
- }
- if buildkitEnabled, err := s.dockerCli.BuildKitEnabled(); err != nil || !buildkitEnabled {
- return s.doBuildClassic(ctx, opts)
- }
- return s.doBuildBuildkit(ctx, project, opts, mode)
-}
-
-func (s *composeService) toBuildOptions(project *types.Project, service types.ServiceConfig, imageTag string, sshKeys []types.SSHKey) (build.Options, error) {
- var tags []string
- tags = append(tags, imageTag)
-
- buildArgs := flatten(service.Build.Args.Resolve(func(s string) (string, bool) {
- s, ok := project.Environment[s]
- return s, ok
- }))
-
- var plats []specs.Platform
- if platform, ok := project.Environment["DOCKER_DEFAULT_PLATFORM"]; ok {
- p, err := platforms.Parse(platform)
- if err != nil {
- return build.Options{}, err
+ for i, service := range project.Services {
+ imgName := api.GetImageNameOrDefault(service, project.Name)
+ img, ok := imgs[imgName]
+ if !ok {
+ continue
}
- plats = append(plats, p)
- }
- if service.Platform != "" {
- p, err := platforms.Parse(service.Platform)
- if err != nil {
- return build.Options{}, err
+ if service.Platform != "" {
+ platform, err := platforms.Parse(service.Platform)
+ if err != nil {
+ return nil, err
+ }
+ inspect, err := s.apiClient().ImageInspect(ctx, img.ID)
+ if err != nil {
+ return nil, err
+ }
+ actual := specs.Platform{
+ Architecture: inspect.Architecture,
+ OS: inspect.Os,
+ Variant: inspect.Variant,
+ }
+ if !platforms.NewMatcher(platform).Match(actual) {
+ logrus.Debugf("local image %s doesn't match expected platform %s", service.Image, service.Platform)
+ // there is a local image, but it's for the wrong platform, so
+ // pretend it doesn't exist so that we can pull/build an image
+ // for the correct platform instead
+ delete(imgs, imgName)
+ }
}
- plats = append(plats, p)
- }
- cacheFrom, err := buildflags.ParseCacheEntry(service.Build.CacheFrom)
- if err != nil {
- return build.Options{}, err
- }
- cacheTo, err := buildflags.ParseCacheEntry(service.Build.CacheTo)
- if err != nil {
- return build.Options{}, err
- }
+ project.Services[i].CustomLabels.Add(api.ImageDigestLabel, img.ID)
- sessionConfig := []session.Attachable{
- authprovider.NewDockerAuthProvider(s.stderr()),
- }
- if len(sshKeys) > 0 || len(service.Build.SSH) > 0 {
- sshAgentProvider, err := sshAgentProvider(append(service.Build.SSH, sshKeys...))
- if err != nil {
- return build.Options{}, err
- }
- sessionConfig = append(sessionConfig, sshAgentProvider)
}
- return build.Options{
- Inputs: build.Inputs{
- ContextPath: service.Build.Context,
- DockerfilePath: dockerFilePath(service.Build.Context, service.Build.Dockerfile),
- },
- CacheFrom: cacheFrom,
- CacheTo: cacheTo,
- NoCache: service.Build.NoCache,
- Pull: service.Build.Pull,
- BuildArgs: buildArgs,
- Tags: tags,
- Target: service.Build.Target,
- Exports: []bclient.ExportEntry{{Type: "image", Attrs: map[string]string{}}},
- Platforms: plats,
- Labels: service.Build.Labels,
- NetworkMode: service.Build.Network,
- ExtraHosts: service.Build.ExtraHosts,
- Session: sessionConfig,
- }, nil
+ return imgs, nil
}
-func flatten(in types.MappingWithEquals) types.Mapping {
- if len(in) == 0 {
- return nil
- }
- out := types.Mapping{}
- for k, v := range in {
- if v == nil {
- continue
+// resolveAndMergeBuildArgs returns the final set of build arguments to use for the service image build.
+//
+// First, args directly defined via `build.args` in YAML are considered.
+// Then, any explicitly passed args in opts (e.g. via `--build-arg` on the CLI) are merged, overwriting any
+// keys that already exist.
+// Next, any keys without a value are resolved using the project environment.
+//
+// Finally, standard proxy variables based on the Docker client configuration are added, but will not overwrite
+// any values if already present.
+func resolveAndMergeBuildArgs(proxyConfig map[string]string, project *types.Project, service types.ServiceConfig, opts api.BuildOptions) types.MappingWithEquals {
+ result := make(types.MappingWithEquals).
+ OverrideBy(service.Build.Args).
+ OverrideBy(opts.Args).
+ Resolve(envResolver(project.Environment))
+
+ // proxy arguments do NOT override and should NOT have env resolution applied,
+ // so they're handled last
+ for k, v := range proxyConfig {
+ if _, ok := result[k]; !ok {
+ v := v
+ result[k] = &v
}
- out[k] = *v
}
- return out
+ return result
}
-func mergeArgs(m ...types.Mapping) types.Mapping {
- merged := types.Mapping{}
- for _, mapping := range m {
- for key, val := range mapping {
- merged[key] = val
+func getImageBuildLabels(project *types.Project, service types.ServiceConfig) types.Labels {
+ ret := make(types.Labels)
+ if service.Build != nil {
+ for k, v := range service.Build.Labels {
+ ret.Add(k, v)
}
}
- return merged
-}
-func dockerFilePath(context string, dockerfile string) string {
- if urlutil.IsGitURL(context) || filepath.IsAbs(dockerfile) {
- return dockerfile
- }
- return filepath.Join(context, dockerfile)
+ ret.Add(api.VersionLabel, api.ComposeVersion)
+ ret.Add(api.ProjectLabel, project.Name)
+ ret.Add(api.ServiceLabel, service.Name)
+ return ret
}
-func sshAgentProvider(sshKeys types.SSHConfig) (session.Attachable, error) {
- sshConfig := make([]sshprovider.AgentConfig, 0, len(sshKeys))
- for _, sshKey := range sshKeys {
- sshConfig = append(sshConfig, sshprovider.AgentConfig{
- ID: sshKey.ID,
- Paths: []string{sshKey.Path},
- })
+func addBuildDependencies(services []string, project *types.Project) []string {
+ servicesWithDependencies := utils.NewSet(services...)
+ for _, service := range services {
+ s, ok := project.Services[service]
+ if !ok {
+ s = project.DisabledServices[service]
+ }
+ b := s.Build
+ if b != nil {
+ for _, target := range b.AdditionalContexts {
+ if s, found := strings.CutPrefix(target, types.ServicePrefix); found {
+ servicesWithDependencies.Add(s)
+ }
+ }
+ }
+ }
+ if len(servicesWithDependencies) > len(services) {
+ return addBuildDependencies(servicesWithDependencies.Elements(), project)
}
- return sshprovider.NewSSHAgentProvider(sshConfig)
+ return servicesWithDependencies.Elements()
}
diff --git a/pkg/compose/build_bake.go b/pkg/compose/build_bake.go
new file mode 100644
index 00000000000..104be5c0f2d
--- /dev/null
+++ b/pkg/compose/build_bake.go
@@ -0,0 +1,572 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/sha1"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "slices"
+ "strings"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/containerd/console"
+ "github.com/containerd/errdefs"
+ "github.com/docker/cli/cli-plugins/manager"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/cli/cli/command/image/build"
+ "github.com/docker/cli/cli/streams"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/google/uuid"
+ "github.com/moby/buildkit/client"
+ gitutil "github.com/moby/buildkit/frontend/dockerfile/dfgitutil"
+ "github.com/moby/buildkit/util/progress/progressui"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+ "golang.org/x/sync/errgroup"
+)
+
+func buildWithBake(dockerCli command.Cli) (bool, error) {
+ enabled, err := dockerCli.BuildKitEnabled()
+ if err != nil {
+ return false, err
+ }
+ if !enabled {
+ return false, nil
+ }
+
+ _, err = manager.GetPlugin("buildx", dockerCli, &cobra.Command{})
+ if err != nil {
+ if errdefs.IsNotFound(err) {
+ logrus.Warnf("Docker Compose requires buildx plugin to be installed")
+ return false, nil
+ }
+ return false, err
+ }
+ return true, err
+}
+
+// We _could_ use bake.* types from github.com/docker/buildx but long term plan is to remove buildx as a dependency
+type bakeConfig struct {
+ Groups map[string]bakeGroup `json:"group"`
+ Targets map[string]bakeTarget `json:"target"`
+}
+
+type bakeGroup struct {
+ Targets []string `json:"targets"`
+}
+
+type bakeTarget struct {
+ Context string `json:"context,omitempty"`
+ Contexts map[string]string `json:"contexts,omitempty"`
+ Dockerfile string `json:"dockerfile,omitempty"`
+ DockerfileInline string `json:"dockerfile-inline,omitempty"`
+ Args map[string]string `json:"args,omitempty"`
+ Labels map[string]string `json:"labels,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ CacheFrom []string `json:"cache-from,omitempty"`
+ CacheTo []string `json:"cache-to,omitempty"`
+ Target string `json:"target,omitempty"`
+ Secrets []string `json:"secret,omitempty"`
+ SSH []string `json:"ssh,omitempty"`
+ Platforms []string `json:"platforms,omitempty"`
+ Pull bool `json:"pull,omitempty"`
+ NoCache bool `json:"no-cache,omitempty"`
+ NetworkMode string `json:"network,omitempty"`
+ NoCacheFilter []string `json:"no-cache-filter,omitempty"`
+ ShmSize types.UnitBytes `json:"shm-size,omitempty"`
+ Ulimits []string `json:"ulimits,omitempty"`
+ Call string `json:"call,omitempty"`
+ Entitlements []string `json:"entitlements,omitempty"`
+ ExtraHosts map[string]string `json:"extra-hosts,omitempty"`
+ Outputs []string `json:"output,omitempty"`
+ Attest []string `json:"attest,omitempty"`
+}
+
+type bakeMetadata map[string]buildStatus
+
+type buildStatus struct {
+ Digest string `json:"containerimage.digest"`
+ Image string `json:"image.name"`
+}
+
+func (s *composeService) doBuildBake(ctx context.Context, project *types.Project, serviceToBeBuild types.Services, options api.BuildOptions) (map[string]string, error) { //nolint:gocyclo
+ eg := errgroup.Group{}
+ ch := make(chan *client.SolveStatus)
+ displayMode := progressui.DisplayMode(options.Progress)
+ if displayMode == progressui.AutoMode {
+ options.Progress = os.Getenv("BUILDKIT_PROGRESS")
+ }
+ out := options.Out
+ if out == nil {
+ out = s.stdout()
+ }
+ display, err := progressui.NewDisplay(makeConsole(out), displayMode)
+ if err != nil {
+ return nil, err
+ }
+ eg.Go(func() error {
+ _, err := display.UpdateFrom(ctx, ch)
+ return err
+ })
+
+ cfg := bakeConfig{
+ Groups: map[string]bakeGroup{},
+ Targets: map[string]bakeTarget{},
+ }
+ var (
+ group bakeGroup
+ privileged bool
+ read []string
+ expectedImages = make(map[string]string, len(serviceToBeBuild)) // service name -> expected image
+ targets = make(map[string]string, len(serviceToBeBuild)) // service name -> build target
+ )
+
+ // produce a unique ID for service used as bake target
+ for serviceName := range project.Services {
+ t := strings.ReplaceAll(serviceName, ".", "_")
+ for {
+ if _, ok := targets[serviceName]; !ok {
+ targets[serviceName] = t
+ break
+ }
+ t += "_"
+ }
+ }
+
+ var secretsEnv []string
+ for serviceName, service := range project.Services {
+ if service.Build == nil {
+ continue
+ }
+ buildConfig := *service.Build
+ labels := getImageBuildLabels(project, service)
+
+ args := resolveAndMergeBuildArgs(s.getProxyConfig(), project, service, options).ToMapping()
+ for k, v := range args {
+ args[k] = strings.ReplaceAll(v, "${", "$${")
+ }
+
+ entitlements := buildConfig.Entitlements
+ if slices.Contains(buildConfig.Entitlements, "security.insecure") {
+ privileged = true
+ }
+ if buildConfig.Privileged {
+ entitlements = append(entitlements, "security.insecure")
+ privileged = true
+ }
+
+ var outputs []string
+ var call string
+ push := options.Push && service.Image != ""
+ switch {
+ case options.Check:
+ call = "lint"
+ case len(service.Build.Platforms) > 1:
+ outputs = []string{fmt.Sprintf("type=image,push=%t", push)}
+ default:
+ if push {
+ outputs = []string{"type=registry"}
+ } else {
+ outputs = []string{"type=docker"}
+ }
+ }
+
+ read = append(read, buildConfig.Context)
+ for _, path := range buildConfig.AdditionalContexts {
+ _, _, err := gitutil.ParseGitRef(path)
+ if !strings.Contains(path, "://") && err != nil {
+ read = append(read, path)
+ }
+ }
+
+ image := api.GetImageNameOrDefault(service, project.Name)
+ s.events.On(buildingEvent(image))
+
+ expectedImages[serviceName] = image
+
+ pull := service.Build.Pull || options.Pull
+ noCache := service.Build.NoCache || options.NoCache
+
+ target := targets[serviceName]
+
+ secrets, env := toBakeSecrets(project, buildConfig.Secrets)
+ secretsEnv = append(secretsEnv, env...)
+
+ cfg.Targets[target] = bakeTarget{
+ Context: buildConfig.Context,
+ Contexts: additionalContexts(buildConfig.AdditionalContexts, targets),
+ Dockerfile: dockerFilePath(buildConfig.Context, buildConfig.Dockerfile),
+ DockerfileInline: strings.ReplaceAll(buildConfig.DockerfileInline, "${", "$${"),
+ Args: args,
+ Labels: labels,
+ Tags: append(buildConfig.Tags, image),
+
+ CacheFrom: buildConfig.CacheFrom,
+ CacheTo: buildConfig.CacheTo,
+ NetworkMode: buildConfig.Network,
+ Platforms: buildConfig.Platforms,
+ Target: buildConfig.Target,
+ Secrets: secrets,
+ SSH: toBakeSSH(append(buildConfig.SSH, options.SSHs...)),
+ Pull: pull,
+ NoCache: noCache,
+ ShmSize: buildConfig.ShmSize,
+ Ulimits: toBakeUlimits(buildConfig.Ulimits),
+ Entitlements: entitlements,
+ ExtraHosts: toBakeExtraHosts(buildConfig.ExtraHosts),
+
+ Outputs: outputs,
+ Call: call,
+ Attest: toBakeAttest(buildConfig),
+ }
+ }
+
+ // create a bake group with targets for services to build
+ for serviceName, service := range serviceToBeBuild {
+ if service.Build == nil {
+ continue
+ }
+ group.Targets = append(group.Targets, targets[serviceName])
+ }
+
+ cfg.Groups["default"] = group
+
+ b, err := json.MarshalIndent(cfg, "", " ")
+ if err != nil {
+ return nil, err
+ }
+
+ if options.Print {
+ _, err = fmt.Fprintln(s.stdout(), string(b))
+ return nil, err
+ }
+ logrus.Debugf("bake build config:\n%s", string(b))
+
+ tmpdir := os.TempDir()
+ var metadataFile string
+ for {
+ // we don't use os.CreateTemp here as we need a temporary file name, but don't want it actually created
+ // as bake relies on atomicwriter and this creates conflict during rename
+ metadataFile = filepath.Join(tmpdir, fmt.Sprintf("compose-build-metadataFile-%s.json", uuid.New().String()))
+ if _, err = os.Stat(metadataFile); err != nil {
+ if os.IsNotExist(err) {
+ break
+ }
+ var pathError *fs.PathError
+ if errors.As(err, &pathError) {
+ return nil, fmt.Errorf("can't access os.tempDir %s: %w", tmpdir, pathError.Err)
+ }
+ }
+ }
+ defer func() {
+ _ = os.Remove(metadataFile)
+ }()
+
+ buildx, err := manager.GetPlugin("buildx", s.dockerCli, &cobra.Command{})
+ if err != nil {
+ return nil, err
+ }
+
+ if versions.LessThan(buildx.Version[1:], "0.17.0") {
+ return nil, fmt.Errorf("compose build requires buildx 0.17 or later")
+ }
+
+ args := []string{"bake", "--file", "-", "--progress", "rawjson", "--metadata-file", metadataFile}
+ // FIXME we should prompt user about this, but this is a breaking change in UX
+ for _, path := range read {
+ args = append(args, "--allow", "fs.read="+path)
+ }
+ if privileged {
+ args = append(args, "--allow", "security.insecure")
+ }
+ if options.SBOM != "" {
+ args = append(args, "--sbom="+options.SBOM)
+ }
+ if options.Provenance != "" {
+ args = append(args, "--provenance="+options.Provenance)
+ }
+
+ if options.Builder != "" {
+ args = append(args, "--builder", options.Builder)
+ }
+ if options.Quiet {
+ args = append(args, "--progress=quiet")
+ }
+
+ logrus.Debugf("Executing bake with args: %v", args)
+
+ if s.dryRun {
+ return s.dryRunBake(cfg), nil
+ }
+ cmd := exec.CommandContext(ctx, buildx.Path, args...)
+
+ err = s.prepareShellOut(ctx, types.NewMapping(os.Environ()), cmd)
+ if err != nil {
+ return nil, err
+ }
+ endpoint, cleanup, err := s.propagateDockerEndpoint()
+ if err != nil {
+ return nil, err
+ }
+ cmd.Env = append(cmd.Env, endpoint...)
+ cmd.Env = append(cmd.Env, secretsEnv...)
+ defer cleanup()
+
+ cmd.Stdout = s.stdout()
+ cmd.Stdin = bytes.NewBuffer(b)
+ pipe, err := cmd.StderrPipe()
+ if err != nil {
+ return nil, err
+ }
+
+ var errMessage []string
+ reader := bufio.NewReader(pipe)
+
+ err = cmd.Start()
+ if err != nil {
+ return nil, err
+ }
+ eg.Go(cmd.Wait)
+ for {
+ line, readErr := reader.ReadString('\n')
+ if readErr != nil {
+ if readErr == io.EOF {
+ break
+ }
+ if errors.Is(readErr, os.ErrClosed) {
+ logrus.Debugf("bake stopped")
+ break
+ }
+ return nil, fmt.Errorf("failed to execute bake: %w", readErr)
+ }
+ decoder := json.NewDecoder(strings.NewReader(line))
+ var status client.SolveStatus
+ err := decoder.Decode(&status)
+ if err != nil {
+ if strings.HasPrefix(line, "ERROR: ") {
+ errMessage = append(errMessage, line[7:])
+ } else {
+ errMessage = append(errMessage, line)
+ }
+ continue
+ }
+ ch <- &status
+ }
+ close(ch) // stop build progress UI
+
+ err = eg.Wait()
+ if err != nil {
+ if len(errMessage) > 0 {
+ return nil, errors.New(strings.Join(errMessage, "\n"))
+ }
+ return nil, fmt.Errorf("failed to execute bake: %w", err)
+ }
+
+ b, err = os.ReadFile(metadataFile)
+ if err != nil {
+ return nil, err
+ }
+
+ var md bakeMetadata
+ err = json.Unmarshal(b, &md)
+ if err != nil {
+ return nil, err
+ }
+
+ results := map[string]string{}
+ for name := range serviceToBeBuild {
+ image := expectedImages[name]
+ target := targets[name]
+ built, ok := md[target]
+ if !ok {
+ return nil, fmt.Errorf("build result not found in Bake metadata for service %s", name)
+ }
+ results[image] = built.Digest
+ s.events.On(builtEvent(image))
+ }
+ return results, nil
+}
+
+// makeConsole wraps the provided writer to match [containerd.File] interface if it is of type *streams.Out.
+// buildkit's NewDisplay doesn't actually require a [io.Reader], it only uses the [containerd.Console] type to
+// benefits from ANSI capabilities, but only does writes.
+func makeConsole(out io.Writer) io.Writer {
+ if s, ok := out.(*streams.Out); ok {
+ return &_console{s}
+ }
+ return out
+}
+
+var _ console.File = &_console{}
+
+type _console struct {
+ *streams.Out
+}
+
+func (c _console) Read(p []byte) (n int, err error) {
+ return 0, errors.New("not implemented")
+}
+
+func (c _console) Close() error {
+ return nil
+}
+
+func (c _console) Fd() uintptr {
+ return c.FD()
+}
+
+func (c _console) Name() string {
+ return "compose"
+}
+
+func toBakeExtraHosts(hosts types.HostsList) map[string]string {
+ m := make(map[string]string)
+ for k, v := range hosts {
+ m[k] = strings.Join(v, ",")
+ }
+ return m
+}
+
+func additionalContexts(contexts types.Mapping, targets map[string]string) map[string]string {
+ ac := map[string]string{}
+ for k, v := range contexts {
+ if target, found := strings.CutPrefix(v, types.ServicePrefix); found {
+ v = "target:" + targets[target]
+ }
+ ac[k] = v
+ }
+ return ac
+}
+
+func toBakeUlimits(ulimits map[string]*types.UlimitsConfig) []string {
+ s := []string{}
+ for u, l := range ulimits {
+ if l.Single > 0 {
+ s = append(s, fmt.Sprintf("%s=%d", u, l.Single))
+ } else {
+ s = append(s, fmt.Sprintf("%s=%d:%d", u, l.Soft, l.Hard))
+ }
+ }
+ return s
+}
+
+func toBakeSSH(ssh types.SSHConfig) []string {
+ var s []string
+ for _, key := range ssh {
+ s = append(s, fmt.Sprintf("%s=%s", key.ID, key.Path))
+ }
+ return s
+}
+
+func toBakeSecrets(project *types.Project, secrets []types.ServiceSecretConfig) ([]string, []string) {
+ var s []string
+ var env []string
+ for _, ref := range secrets {
+ def := project.Secrets[ref.Source]
+ target := ref.Target
+ if target == "" {
+ target = ref.Source
+ }
+ switch {
+ case def.Environment != "":
+ env = append(env, fmt.Sprintf("%s=%s", def.Environment, project.Environment[def.Environment]))
+ s = append(s, fmt.Sprintf("id=%s,type=env,env=%s", target, def.Environment))
+ case def.File != "":
+ s = append(s, fmt.Sprintf("id=%s,type=file,src=%s", target, def.File))
+ }
+ }
+ return s, env
+}
+
+func toBakeAttest(buildConfig types.BuildConfig) []string {
+ var attests []string
+
+ // Handle per-service provenance configuration (only from build config, not global options)
+ if buildConfig.Provenance != "" {
+ if buildConfig.Provenance == "true" {
+ attests = append(attests, "type=provenance")
+ } else if buildConfig.Provenance != "false" {
+ attests = append(attests, fmt.Sprintf("type=provenance,%s", buildConfig.Provenance))
+ }
+ }
+
+ // Handle per-service SBOM configuration (only from build config, not global options)
+ if buildConfig.SBOM != "" {
+ if buildConfig.SBOM == "true" {
+ attests = append(attests, "type=sbom")
+ } else if buildConfig.SBOM != "false" {
+ attests = append(attests, fmt.Sprintf("type=sbom,%s", buildConfig.SBOM))
+ }
+ }
+
+ return attests
+}
+
+func dockerFilePath(ctxName string, dockerfile string) string {
+ if dockerfile == "" {
+ return ""
+ }
+ if contextType, _ := build.DetectContextType(ctxName); contextType == build.ContextTypeGit {
+ return dockerfile
+ }
+ if !filepath.IsAbs(dockerfile) {
+ dockerfile = filepath.Join(ctxName, dockerfile)
+ }
+ dir := filepath.Dir(dockerfile)
+ symlinks, err := filepath.EvalSymlinks(dir)
+ if err == nil {
+ return filepath.Join(symlinks, filepath.Base(dockerfile))
+ }
+ return dockerfile
+}
+
+func (s composeService) dryRunBake(cfg bakeConfig) map[string]string {
+ bakeResponse := map[string]string{}
+ for name, target := range cfg.Targets {
+ dryRunUUID := fmt.Sprintf("dryRun-%x", sha1.Sum([]byte(name)))
+ s.displayDryRunBuildEvent(name, dryRunUUID, target.Tags[0])
+ bakeResponse[name] = dryRunUUID
+ }
+ for name := range bakeResponse {
+ s.events.On(builtEvent(name))
+ }
+ return bakeResponse
+}
+
+func (s composeService) displayDryRunBuildEvent(name, dryRunUUID, tag string) {
+ s.events.On(api.Resource{
+ ID: name + " ==>",
+ Status: api.Done,
+ Text: fmt.Sprintf("==> writing image %s", dryRunUUID),
+ })
+ s.events.On(api.Resource{
+ ID: name + " ==> ==>",
+ Status: api.Done,
+ Text: fmt.Sprintf(`naming to %s`, tag),
+ })
+}
diff --git a/pkg/compose/build_buildkit.go b/pkg/compose/build_buildkit.go
deleted file mode 100644
index d4120ced3dd..00000000000
--- a/pkg/compose/build_buildkit.go
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package compose
-
-import (
- "context"
- "os"
- "path/filepath"
-
- "github.com/compose-spec/compose-go/types"
- "github.com/docker/buildx/build"
- "github.com/docker/buildx/driver"
- xprogress "github.com/docker/buildx/util/progress"
-)
-
-func (s *composeService) doBuildBuildkit(ctx context.Context, project *types.Project, opts map[string]build.Options, mode string) (map[string]string, error) {
- const drivername = "default"
- d, err := driver.GetDriver(ctx, drivername, nil, s.apiClient(), s.configFile(), nil, nil, nil, nil, nil, project.WorkingDir)
- if err != nil {
- return nil, err
- }
- driverInfo := []build.DriverInfo{
- {
- Name: drivername,
- Driver: d,
- },
- }
-
- // Progress needs its own context that lives longer than the
- // build one otherwise it won't read all the messages from
- // build and will lock
- progressCtx, cancel := context.WithCancel(context.Background())
- defer cancel()
- w := xprogress.NewPrinter(progressCtx, s.stdout(), os.Stdout, mode)
-
- // We rely on buildx "docker" builder integrated in docker engine, so don't need a DockerAPI here
- response, err := build.Build(ctx, driverInfo, opts, nil, filepath.Dir(s.configFile().Filename), w)
- errW := w.Wait()
- if err == nil {
- err = errW
- }
- if err != nil {
- return nil, WrapCategorisedComposeError(err, BuildFailure)
- }
-
- imagesBuilt := map[string]string{}
- for name, img := range response {
- if img == nil || len(img.ExporterResponse) == 0 {
- continue
- }
- digest, ok := img.ExporterResponse["containerimage.digest"]
- if !ok {
- continue
- }
- imagesBuilt[name] = digest
- }
-
- return imagesBuilt, err
-}
diff --git a/pkg/compose/build_classic.go b/pkg/compose/build_classic.go
index 6f76999cf7e..2157ffdecb5 100644
--- a/pkg/compose/build_classic.go
+++ b/pkg/compose/build_classic.go
@@ -19,91 +19,174 @@ package compose
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
- "runtime"
"strings"
- buildx "github.com/docker/buildx/build"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command/image/build"
- dockertypes "github.com/docker/docker/api/types"
- "github.com/docker/docker/cli"
- "github.com/docker/docker/pkg/archive"
- "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/compose/v5/pkg/api"
+ buildtypes "github.com/docker/docker/api/types/build"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
- "github.com/docker/docker/pkg/urlutil"
- "github.com/hashicorp/go-multierror"
- "github.com/pkg/errors"
+ "github.com/moby/go-archive"
+ "github.com/sirupsen/logrus"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
-func (s *composeService) doBuildClassic(ctx context.Context, opts map[string]buildx.Options) (map[string]string, error) {
- var nameDigests = make(map[string]string)
- var errs error
- for name, o := range opts {
- digest, err := s.doBuildClassicSimpleImage(ctx, o)
+func (s *composeService) doBuildClassic(ctx context.Context, project *types.Project, serviceToBuild types.Services, options api.BuildOptions) (map[string]string, error) {
+ imageIDs := map[string]string{}
+
+ // Not using bake, additional_context: service:xx is implemented by building images in dependency order
+ project, err := project.WithServicesTransform(func(serviceName string, service types.ServiceConfig) (types.ServiceConfig, error) {
+ if service.Build != nil {
+ for _, c := range service.Build.AdditionalContexts {
+ if t, found := strings.CutPrefix(c, types.ServicePrefix); found {
+ if service.DependsOn == nil {
+ service.DependsOn = map[string]types.ServiceDependency{}
+ }
+ service.DependsOn[t] = types.ServiceDependency{
+ Condition: "build", // non-canonical, but will force dependency graph ordering
+ }
+ }
+ }
+ }
+ return service, nil
+ })
+ if err != nil {
+ return imageIDs, err
+ }
+
+ // we use a pre-allocated []string to collect build digest by service index while running concurrent goroutines
+ builtDigests := make([]string, len(project.Services))
+ names := project.ServiceNames()
+ getServiceIndex := func(name string) int {
+ for idx, n := range names {
+ if n == name {
+ return idx
+ }
+ }
+ return -1
+ }
+
+ err = InDependencyOrder(ctx, project, func(ctx context.Context, name string) error {
+ trace.SpanFromContext(ctx).SetAttributes(attribute.String("builder", "classic"))
+ service, ok := serviceToBuild[name]
+ if !ok {
+ return nil
+ }
+
+ image := api.GetImageNameOrDefault(service, project.Name)
+ s.events.On(buildingEvent(image))
+ id, err := s.doBuildImage(ctx, project, service, options)
if err != nil {
- errs = multierror.Append(errs, err).ErrorOrNil()
+ return err
}
- nameDigests[name] = digest
+ s.events.On(builtEvent(image))
+ builtDigests[getServiceIndex(name)] = id
+
+ if options.Push {
+ return s.push(ctx, project, api.PushOptions{})
+ }
+ return nil
+ }, func(traversal *graphTraversal) {
+ traversal.maxConcurrency = s.maxConcurrency
+ })
+ if err != nil {
+ return nil, err
}
- return nameDigests, errs
+ for i, imageDigest := range builtDigests {
+ if imageDigest != "" {
+ service := project.Services[names[i]]
+ imageRef := api.GetImageNameOrDefault(service, project.Name)
+ imageIDs[imageRef] = imageDigest
+ }
+ }
+ return imageIDs, err
}
-// nolint: gocyclo
-func (s *composeService) doBuildClassicSimpleImage(ctx context.Context, options buildx.Options) (string, error) {
+//nolint:gocyclo
+func (s *composeService) doBuildImage(ctx context.Context, project *types.Project, service types.ServiceConfig, options api.BuildOptions) (string, error) {
var (
buildCtx io.ReadCloser
dockerfileCtx io.ReadCloser
contextDir string
- tempDir string
relDockerfile string
-
- err error
)
- dockerfileName := options.Inputs.DockerfilePath
- specifiedContext := options.Inputs.ContextPath
+ if len(service.Build.Platforms) > 1 {
+ return "", fmt.Errorf("the classic builder doesn't support multi-arch build, set DOCKER_BUILDKIT=1 to use BuildKit")
+ }
+ if service.Build.Privileged {
+ return "", fmt.Errorf("the classic builder doesn't support privileged mode, set DOCKER_BUILDKIT=1 to use BuildKit")
+ }
+ if len(service.Build.AdditionalContexts) > 0 {
+ return "", fmt.Errorf("the classic builder doesn't support additional contexts, set DOCKER_BUILDKIT=1 to use BuildKit")
+ }
+ if len(service.Build.SSH) > 0 {
+ return "", fmt.Errorf("the classic builder doesn't support SSH keys, set DOCKER_BUILDKIT=1 to use BuildKit")
+ }
+ if len(service.Build.Secrets) > 0 {
+ return "", fmt.Errorf("the classic builder doesn't support secrets, set DOCKER_BUILDKIT=1 to use BuildKit")
+ }
+
+ if service.Build.Labels == nil {
+ service.Build.Labels = make(map[string]string)
+ }
+ service.Build.Labels[api.ImageBuilderLabel] = "classic"
+
+ dockerfileName := dockerFilePath(service.Build.Context, service.Build.Dockerfile)
+ specifiedContext := service.Build.Context
progBuff := s.stdout()
buildBuff := s.stdout()
- if options.ImageIDFile != "" {
- // Avoid leaving a stale file if we eventually fail
- if err := os.Remove(options.ImageIDFile); err != nil && !os.IsNotExist(err) {
- return "", errors.Wrap(err, "removing image ID file")
- }
+
+ contextType, err := build.DetectContextType(specifiedContext)
+ if err != nil {
+ return "", err
}
- switch {
- case isLocalDir(specifiedContext):
+ switch contextType {
+ case build.ContextTypeStdin:
+ return "", fmt.Errorf("building from STDIN is not supported")
+ case build.ContextTypeLocal:
contextDir, relDockerfile, err = build.GetContextFromLocalDir(specifiedContext, dockerfileName)
- if err == nil && strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) {
- // Dockerfile is outside of build-context; read the Dockerfile and pass it as dockerfileCtx
+ if err != nil {
+ return "", fmt.Errorf("unable to prepare context: %w", err)
+ }
+ if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) {
+ // Dockerfile is outside build-context; read the Dockerfile and pass it as dockerfileCtx
dockerfileCtx, err = os.Open(dockerfileName)
if err != nil {
- return "", errors.Errorf("unable to open Dockerfile: %v", err)
+ return "", fmt.Errorf("unable to open Dockerfile: %w", err)
}
- defer dockerfileCtx.Close() // nolint:errcheck
+ defer dockerfileCtx.Close() //nolint:errcheck
}
- case urlutil.IsGitURL(specifiedContext):
+ case build.ContextTypeGit:
+ var tempDir string
tempDir, relDockerfile, err = build.GetContextFromGitURL(specifiedContext, dockerfileName)
- case urlutil.IsURL(specifiedContext):
+ if err != nil {
+ return "", fmt.Errorf("unable to prepare context: %w", err)
+ }
+ defer func() {
+ _ = os.RemoveAll(tempDir)
+ }()
+ contextDir = tempDir
+ case build.ContextTypeRemote:
buildCtx, relDockerfile, err = build.GetContextFromURL(progBuff, specifiedContext, dockerfileName)
+ if err != nil {
+ return "", fmt.Errorf("unable to prepare context: %w", err)
+ }
default:
- return "", errors.Errorf("unable to prepare context: path %q not found", specifiedContext)
- }
-
- if err != nil {
- return "", errors.Errorf("unable to prepare context: %s", err)
- }
-
- if tempDir != "" {
- defer os.RemoveAll(tempDir) // nolint:errcheck
- contextDir = tempDir
+ return "", fmt.Errorf("unable to prepare context: path %q not found", specifiedContext)
}
// read from a directory into tar archive
@@ -114,16 +197,16 @@ func (s *composeService) doBuildClassicSimpleImage(ctx context.Context, options
}
if err := build.ValidateContextDirectory(contextDir, excludes); err != nil {
- return "", errors.Wrap(err, "checking context")
+ return "", fmt.Errorf("checking context: %w", err)
}
// And canonicalize dockerfile name to a platform-independent one
- relDockerfile = archive.CanonicalTarNameForPath(relDockerfile)
+ relDockerfile = filepath.ToSlash(relDockerfile)
excludes = build.TrimBuildFilesFromExcludes(excludes, relDockerfile, false)
buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
ExcludePatterns: excludes,
- ChownOpts: &idtools.Identity{},
+ ChownOpts: &archive.ChownOpts{UID: 0, GID: 0},
})
if err != nil {
return "", err
@@ -143,45 +226,49 @@ func (s *composeService) doBuildClassicSimpleImage(ctx context.Context, options
return "", err
}
- // if up to this point nothing has set the context then we must have another
- // way for sending it(streaming) and set the context to the Dockerfile
- if dockerfileCtx != nil && buildCtx == nil {
- buildCtx = dockerfileCtx
- }
-
+ // Setup an upload progress bar
progressOutput := streamformatter.NewProgressOutput(progBuff)
- var body io.Reader
- if buildCtx != nil {
- body = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")
- }
+ body := progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")
configFile := s.configFile()
creds, err := configFile.GetAllCredentials()
if err != nil {
return "", err
}
- authConfigs := make(map[string]dockertypes.AuthConfig, len(creds))
- for k, auth := range creds {
- authConfigs[k] = dockertypes.AuthConfig(auth)
+ authConfigs := make(map[string]registry.AuthConfig, len(creds))
+ for k, authConfig := range creds {
+ authConfigs[k] = registry.AuthConfig{
+ Username: authConfig.Username,
+ Password: authConfig.Password,
+ ServerAddress: authConfig.ServerAddress,
+
+ // TODO(thaJeztah): Are these expected to be included? See https://github.com/docker/cli/pull/6516#discussion_r2387586472
+ Auth: authConfig.Auth,
+ IdentityToken: authConfig.IdentityToken,
+ RegistryToken: authConfig.RegistryToken,
+ }
}
- buildOptions := imageBuildOptions(options)
- buildOptions.Version = dockertypes.BuilderV1
- buildOptions.Dockerfile = relDockerfile
- buildOptions.AuthConfigs = authConfigs
+ buildOpts := imageBuildOptions(s.getProxyConfig(), project, service, options)
+ imageName := api.GetImageNameOrDefault(service, project.Name)
+ buildOpts.Tags = append(buildOpts.Tags, imageName)
+ buildOpts.Dockerfile = relDockerfile
+ buildOpts.AuthConfigs = authConfigs
+ buildOpts.Memory = options.Memory
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- response, err := s.apiClient().ImageBuild(ctx, body, buildOptions)
+ s.events.On(buildingEvent(imageName))
+ response, err := s.apiClient().ImageBuild(ctx, body, buildOpts)
if err != nil {
return "", err
}
- defer response.Body.Close() // nolint:errcheck
+ defer response.Body.Close() //nolint:errcheck
imageID := ""
aux := func(msg jsonmessage.JSONMessage) {
- var result dockertypes.BuildResult
+ var result buildtypes.Result
if err := json.Unmarshal(*msg.Aux, &result); err != nil {
- fmt.Fprintf(s.stderr(), "Failed to parse aux message: %s", err)
+ logrus.Errorf("Failed to parse aux message: %s", err)
} else {
imageID = result.ID
}
@@ -189,7 +276,8 @@ func (s *composeService) doBuildClassicSimpleImage(ctx context.Context, options
err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, progBuff.FD(), true, aux)
if err != nil {
- if jerr, ok := err.(*jsonmessage.JSONError); ok {
+ var jerr *jsonmessage.JSONError
+ if errors.As(err, &jerr) {
// If no error code is set, default to 1
if jerr.Code == 0 {
jerr.Code = 1
@@ -198,54 +286,23 @@ func (s *composeService) doBuildClassicSimpleImage(ctx context.Context, options
}
return "", err
}
-
- // Windows: show error message about modified file permissions if the
- // daemon isn't running Windows.
- if response.OSType != "windows" && runtime.GOOS == "windows" {
- // if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet {
- fmt.Fprintln(s.stdout(), "SECURITY WARNING: You are building a Docker "+
- "image from Windows against a non-Windows Docker host. All files and "+
- "directories added to build context will have '-rwxr-xr-x' permissions. "+
- "It is recommended to double check and reset permissions for sensitive "+
- "files and directories.")
- }
-
- if options.ImageIDFile != "" {
- if imageID == "" {
- return "", errors.Errorf("Server did not provide an image ID. Cannot write %s", options.ImageIDFile)
- }
- if err := ioutil.WriteFile(options.ImageIDFile, []byte(imageID), 0666); err != nil {
- return "", err
- }
- }
-
+ s.events.On(builtEvent(imageName))
return imageID, nil
}
-func isLocalDir(c string) bool {
- _, err := os.Stat(c)
- return err == nil
-}
-
-func imageBuildOptions(options buildx.Options) dockertypes.ImageBuildOptions {
- return dockertypes.ImageBuildOptions{
- Tags: options.Tags,
- NoCache: options.NoCache,
+func imageBuildOptions(proxyConfigs map[string]string, project *types.Project, service types.ServiceConfig, options api.BuildOptions) buildtypes.ImageBuildOptions {
+ config := service.Build
+ return buildtypes.ImageBuildOptions{
+ Version: buildtypes.BuilderV1,
+ Tags: config.Tags,
+ NoCache: config.NoCache,
Remove: true,
- PullParent: options.Pull,
- BuildArgs: toMapStringStringPtr(options.BuildArgs),
- Labels: options.Labels,
- NetworkMode: options.NetworkMode,
- ExtraHosts: options.ExtraHosts,
- Target: options.Target,
- }
-}
-
-func toMapStringStringPtr(source map[string]string) map[string]*string {
- dest := make(map[string]*string)
- for k, v := range source {
- v := v
- dest[k] = &v
+ PullParent: config.Pull,
+ BuildArgs: resolveAndMergeBuildArgs(proxyConfigs, project, service, options),
+ Labels: config.Labels,
+ NetworkMode: config.Network,
+ ExtraHosts: config.ExtraHosts.AsList(":"),
+ Target: config.Target,
+ Isolation: container.Isolation(config.Isolation),
}
- return dest
}
diff --git a/pkg/compose/build_test.go b/pkg/compose/build_test.go
new file mode 100644
index 00000000000..fa0a9e2c4e5
--- /dev/null
+++ b/pkg/compose/build_test.go
@@ -0,0 +1,57 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "slices"
+ "testing"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "gotest.tools/v3/assert"
+)
+
+func Test_addBuildDependencies(t *testing.T) {
+ project := &types.Project{Services: types.Services{
+ "test": types.ServiceConfig{
+ Build: &types.BuildConfig{
+ AdditionalContexts: map[string]string{
+ "foo": "service:foo",
+ "bar": "service:bar",
+ },
+ },
+ },
+ "foo": types.ServiceConfig{
+ Build: &types.BuildConfig{
+ AdditionalContexts: map[string]string{
+ "zot": "service:zot",
+ },
+ },
+ },
+ "bar": types.ServiceConfig{
+ Build: &types.BuildConfig{},
+ },
+ "zot": types.ServiceConfig{
+ Build: &types.BuildConfig{},
+ },
+ }}
+
+ services := addBuildDependencies([]string{"test"}, project)
+ expected := []string{"test", "foo", "bar", "zot"}
+ slices.Sort(services)
+ slices.Sort(expected)
+ assert.DeepEqual(t, services, expected)
+}
diff --git a/pkg/compose/commit.go b/pkg/compose/commit.go
new file mode 100644
index 00000000000..f50e8fc22e2
--- /dev/null
+++ b/pkg/compose/commit.go
@@ -0,0 +1,78 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/api/types/container"
+)
+
+func (s *composeService) Commit(ctx context.Context, projectName string, options api.CommitOptions) error {
+ return Run(ctx, func(ctx context.Context) error {
+ return s.commit(ctx, projectName, options)
+ }, "commit", s.events)
+}
+
+func (s *composeService) commit(ctx context.Context, projectName string, options api.CommitOptions) error {
+ projectName = strings.ToLower(projectName)
+
+ ctr, err := s.getSpecifiedContainer(ctx, projectName, oneOffInclude, false, options.Service, options.Index)
+ if err != nil {
+ return err
+ }
+
+ name := getCanonicalContainerName(ctr)
+
+ s.events.On(api.Resource{
+ ID: name,
+ Status: api.Working,
+ Text: api.StatusCommitting,
+ })
+
+ if s.dryRun {
+ s.events.On(api.Resource{
+ ID: name,
+ Status: api.Done,
+ Text: api.StatusCommitted,
+ })
+
+ return nil
+ }
+
+ response, err := s.apiClient().ContainerCommit(ctx, ctr.ID, container.CommitOptions{
+ Reference: options.Reference,
+ Comment: options.Comment,
+ Author: options.Author,
+ Changes: options.Changes.GetSlice(),
+ Pause: options.Pause,
+ })
+ if err != nil {
+ return err
+ }
+
+ s.events.On(api.Resource{
+ ID: name,
+ Text: fmt.Sprintf("Committed as %s", response.ID),
+ Status: api.Done,
+ })
+
+ return nil
+}
diff --git a/pkg/compose/compose.go b/pkg/compose/compose.go
index 5f7446e084c..e303abba432 100644
--- a/pkg/compose/compose.go
+++ b/pkg/compose/compose.go
@@ -17,37 +17,219 @@
package compose
import (
- "bytes"
"context"
- "encoding/json"
+ "errors"
"fmt"
"io"
+ "strconv"
"strings"
+ "sync"
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/buildx/store/storeutil"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/config/configfile"
+ "github.com/docker/cli/cli/flags"
"github.com/docker/cli/cli/streams"
- "github.com/docker/compose/v2/pkg/api"
- moby "github.com/docker/docker/api/types"
+ "github.com/docker/compose/v5/pkg/dryrun"
+ "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/swarm"
+ "github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client"
- "github.com/pkg/errors"
- "github.com/sanathkr/go-yaml"
+ "github.com/jonboulle/clockwork"
+ "github.com/sirupsen/logrus"
+
+ "github.com/docker/compose/v5/pkg/api"
)
-// Separator is used for naming components
-var Separator = "-"
+type Option func(service *composeService) error
+
+// NewComposeService creates a Compose service using Docker CLI.
+// This is the standard constructor that requires command.Cli for full functionality.
+//
+// Example usage:
+//
+// dockerCli, _ := command.NewDockerCli()
+// service := NewComposeService(dockerCli)
+//
+// For advanced configuration with custom overrides, use ServiceOption functions:
+//
+// service := NewComposeService(dockerCli,
+// WithPrompt(prompt.NewPrompt(cli.In(), cli.Out()).Confirm),
+// WithOutputStream(customOut),
+// WithErrorStream(customErr),
+// WithInputStream(customIn))
+//
+// Or set all streams at once:
+//
+// service := NewComposeService(dockerCli,
+// WithStreams(customOut, customErr, customIn))
+func NewComposeService(dockerCli command.Cli, options ...Option) (api.Compose, error) {
+ s := &composeService{
+ dockerCli: dockerCli,
+ clock: clockwork.NewRealClock(),
+ maxConcurrency: -1,
+ dryRun: false,
+ }
+ for _, option := range options {
+ if err := option(s); err != nil {
+ return nil, err
+ }
+ }
+ if s.prompt == nil {
+ s.prompt = func(message string, defaultValue bool) (bool, error) {
+ fmt.Println(message)
+ logrus.Warning("Compose is running without a 'prompt' component to interact with user")
+ return defaultValue, nil
+ }
+ }
+ if s.events == nil {
+ s.events = &ignore{}
+ }
+
+ // If custom streams were provided, wrap the Docker CLI to use them
+ if s.outStream != nil || s.errStream != nil || s.inStream != nil {
+ s.dockerCli = s.wrapDockerCliWithStreams(dockerCli)
+ }
+
+ return s, nil
+}
+
+// WithStreams sets custom I/O streams for output and interaction
+func WithStreams(out, err io.Writer, in io.Reader) Option {
+ return func(s *composeService) error {
+ s.outStream = out
+ s.errStream = err
+ s.inStream = in
+ return nil
+ }
+}
+
+// WithOutputStream sets a custom output stream
+func WithOutputStream(out io.Writer) Option {
+ return func(s *composeService) error {
+ s.outStream = out
+ return nil
+ }
+}
+
+// WithErrorStream sets a custom error stream
+func WithErrorStream(err io.Writer) Option {
+ return func(s *composeService) error {
+ s.errStream = err
+ return nil
+ }
+}
+
+// WithInputStream sets a custom input stream
+func WithInputStream(in io.Reader) Option {
+ return func(s *composeService) error {
+ s.inStream = in
+ return nil
+ }
+}
+
+// WithContextInfo sets custom Docker context information
+func WithContextInfo(info api.ContextInfo) Option {
+ return func(s *composeService) error {
+ s.contextInfo = info
+ return nil
+ }
+}
+
+// WithProxyConfig sets custom HTTP proxy configuration for builds
+func WithProxyConfig(config map[string]string) Option {
+ return func(s *composeService) error {
+ s.proxyConfig = config
+ return nil
+ }
+}
+
+// WithPrompt configure a UI component for Compose service to interact with user and confirm actions
+func WithPrompt(prompt Prompt) Option {
+ return func(s *composeService) error {
+ s.prompt = prompt
+ return nil
+ }
+}
+
+// WithMaxConcurrency defines upper limit for concurrent operations against engine API
+func WithMaxConcurrency(maxConcurrency int) Option {
+ return func(s *composeService) error {
+ s.maxConcurrency = maxConcurrency
+ return nil
+ }
+}
+
+// WithDryRun configure Compose to run without actually applying changes
+func WithDryRun(s *composeService) error {
+ s.dryRun = true
+ cli, err := command.NewDockerCli()
+ if err != nil {
+ return err
+ }
+
+ options := flags.NewClientOptions()
+ options.Context = s.dockerCli.CurrentContext()
+ err = cli.Initialize(options, command.WithInitializeClient(func(cli *command.DockerCli) (client.APIClient, error) {
+ return dryrun.NewDryRunClient(s.apiClient(), s.dockerCli)
+ }))
+ if err != nil {
+ return err
+ }
+ s.dockerCli = cli
+ return nil
+}
+
+type Prompt func(message string, defaultValue bool) (bool, error)
-// NewComposeService create a local implementation of the compose.Service API
-func NewComposeService(dockerCli command.Cli) api.Service {
- return &composeService{
- dockerCli: dockerCli,
+// AlwaysOkPrompt returns a Prompt implementation that always returns true without user interaction.
+func AlwaysOkPrompt() Prompt {
+ return func(message string, defaultValue bool) (bool, error) {
+ return true, nil
+ }
+}
+
+// WithEventProcessor configure component to get notified on Compose operation and progress events.
+// Typically used to configure a progress UI
+func WithEventProcessor(bus api.EventProcessor) Option {
+ return func(s *composeService) error {
+ s.events = bus
+ return nil
}
}
type composeService struct {
dockerCli command.Cli
+ // prompt is used to interact with user and confirm actions
+ prompt Prompt
+ // eventBus collects tasks execution events
+ events api.EventProcessor
+
+ // Optional overrides for specific components (for SDK users)
+ outStream io.Writer
+ errStream io.Writer
+ inStream io.Reader
+ contextInfo api.ContextInfo
+ proxyConfig map[string]string
+
+ clock clockwork.Clock
+ maxConcurrency int
+ dryRun bool
+}
+
+// Close releases any connections/resources held by the underlying clients.
+//
+// In practice, this service has the same lifetime as the process, so everything
+// will get cleaned up at about the same time regardless even if not invoked.
+func (s *composeService) Close() error {
+ var errs []error
+ if s.dockerCli != nil {
+ errs = append(errs, s.apiClient().Close())
+ }
+ return errors.Join(errs...)
}
func (s *composeService) apiClient() client.APIClient {
@@ -58,6 +240,22 @@ func (s *composeService) configFile() *configfile.ConfigFile {
return s.dockerCli.ConfigFile()
}
+// getContextInfo returns the context info - either custom override or dockerCli adapter
+func (s *composeService) getContextInfo() api.ContextInfo {
+ if s.contextInfo != nil {
+ return s.contextInfo
+ }
+ return &dockerCliContextInfo{cli: s.dockerCli}
+}
+
+// getProxyConfig returns the proxy config - either custom override or environment-based
+func (s *composeService) getProxyConfig() map[string]string {
+ if s.proxyConfig != nil {
+ return s.proxyConfig
+ }
+ return storeutil.GetProxyConfig(s.dockerCli)
+}
+
func (s *composeService) stdout() *streams.Out {
return s.dockerCli.Out()
}
@@ -66,11 +264,73 @@ func (s *composeService) stdin() *streams.In {
return s.dockerCli.In()
}
-func (s *composeService) stderr() io.Writer {
+func (s *composeService) stderr() *streams.Out {
return s.dockerCli.Err()
}
-func getCanonicalContainerName(c moby.Container) string {
+// readCloserAdapter adapts io.Reader to io.ReadCloser
+type readCloserAdapter struct {
+ r io.Reader
+}
+
+func (r *readCloserAdapter) Read(p []byte) (int, error) {
+ return r.r.Read(p)
+}
+
+func (r *readCloserAdapter) Close() error {
+ return nil
+}
+
+// wrapDockerCliWithStreams wraps the Docker CLI to intercept and override stream methods
+func (s *composeService) wrapDockerCliWithStreams(baseCli command.Cli) command.Cli {
+ wrapper := &streamOverrideWrapper{
+ Cli: baseCli,
+ }
+
+ // Wrap custom streams in Docker CLI's stream types
+ if s.outStream != nil {
+ wrapper.outStream = streams.NewOut(s.outStream)
+ }
+ if s.errStream != nil {
+ wrapper.errStream = streams.NewOut(s.errStream)
+ }
+ if s.inStream != nil {
+ wrapper.inStream = streams.NewIn(&readCloserAdapter{r: s.inStream})
+ }
+
+ return wrapper
+}
+
+// streamOverrideWrapper wraps command.Cli to override streams with custom implementations
+type streamOverrideWrapper struct {
+ command.Cli
+ outStream *streams.Out
+ errStream *streams.Out
+ inStream *streams.In
+}
+
+func (w *streamOverrideWrapper) Out() *streams.Out {
+ if w.outStream != nil {
+ return w.outStream
+ }
+ return w.Cli.Out()
+}
+
+func (w *streamOverrideWrapper) Err() *streams.Out {
+ if w.errStream != nil {
+ return w.errStream
+ }
+ return w.Cli.Err()
+}
+
+func (w *streamOverrideWrapper) In() *streams.In {
+ if w.inStream != nil {
+ return w.inStream
+ }
+ return w.Cli.In()
+}
+
+func getCanonicalContainerName(c container.Summary) string {
if len(c.Names) == 0 {
// corner case, sometime happens on removal. return short ID as a safeguard value
return c.ID[:12]
@@ -81,83 +341,73 @@ func getCanonicalContainerName(c moby.Container) string {
return name[1:]
}
}
- return c.Names[0][1:]
-}
-func getContainerNameWithoutProject(c moby.Container) string {
- name := getCanonicalContainerName(c)
- project := c.Labels[api.ProjectLabel]
- prefix := fmt.Sprintf("%s_%s_", project, c.Labels[api.ServiceLabel])
- if strings.HasPrefix(name, prefix) {
- return name[len(project)+1:]
- }
- return name
+ return strings.TrimPrefix(c.Names[0], "/")
}
-func (s *composeService) Convert(ctx context.Context, project *types.Project, options api.ConvertOptions) ([]byte, error) {
- switch options.Format {
- case "json":
- marshal, err := json.MarshalIndent(project, "", " ")
- if err != nil {
- return nil, err
- }
- return escapeDollarSign(marshal), nil
- case "yaml":
- marshal, err := yaml.Marshal(project)
- if err != nil {
- return nil, err
- }
- return escapeDollarSign(marshal), nil
- default:
- return nil, fmt.Errorf("unsupported format %q", options)
+func getContainerNameWithoutProject(c container.Summary) string {
+ project := c.Labels[api.ProjectLabel]
+ defaultName := getDefaultContainerName(project, c.Labels[api.ServiceLabel], c.Labels[api.ContainerNumberLabel])
+ name := getCanonicalContainerName(c)
+ if name != defaultName {
+ // service declares a custom container_name
+ return name
}
-}
-
-func escapeDollarSign(marshal []byte) []byte {
- dollar := []byte{'$'}
- escDollar := []byte{'$', '$'}
- return bytes.ReplaceAll(marshal, dollar, escDollar)
+ return name[len(project)+1:]
}
// projectFromName builds a types.Project based on actual resources with compose labels set
func (s *composeService) projectFromName(containers Containers, projectName string, services ...string) (*types.Project, error) {
project := &types.Project{
- Name: projectName,
+ Name: projectName,
+ Services: types.Services{},
}
if len(containers) == 0 {
- return project, errors.Wrap(api.ErrNotFound, fmt.Sprintf("no container found for project %q", projectName))
+ return project, fmt.Errorf("no container found for project %q: %w", projectName, api.ErrNotFound)
}
- set := map[string]*types.ServiceConfig{}
+ set := types.Services{}
for _, c := range containers {
- serviceLabel := c.Labels[api.ServiceLabel]
- _, ok := set[serviceLabel]
+ serviceLabel, ok := c.Labels[api.ServiceLabel]
+ if !ok {
+ serviceLabel = getCanonicalContainerName(c)
+ }
+ service, ok := set[serviceLabel]
if !ok {
- set[serviceLabel] = &types.ServiceConfig{
+ service = types.ServiceConfig{
Name: serviceLabel,
Image: c.Image,
Labels: c.Labels,
}
}
- set[serviceLabel].Scale++
+ service.Scale = increment(service.Scale)
+ set[serviceLabel] = service
}
- for _, service := range set {
+ for name, service := range set {
dependencies := service.Labels[api.DependenciesLabel]
- if len(dependencies) > 0 {
+ if dependencies != "" {
service.DependsOn = types.DependsOnConfig{}
for _, dc := range strings.Split(dependencies, ",") {
dcArr := strings.Split(dc, ":")
condition := ServiceConditionRunningOrHealthy
+ // Let's restart the dependency by default if we don't have the info stored in the label
+ restart := true
+ required := true
dependency := dcArr[0]
// backward compatibility
if len(dcArr) > 1 {
condition = dcArr[1]
+ if len(dcArr) > 2 {
+ restart, _ = strconv.ParseBool(dcArr[2])
+ }
}
- service.DependsOn[dependency] = types.ServiceDependency{Condition: condition}
+ service.DependsOn[dependency] = types.ServiceDependency{Condition: condition, Restart: restart, Required: required}
}
+ set[name] = service
}
- project.Services = append(project.Services, *service)
}
+ project.Services = set
+
SERVICES:
for _, qs := range services {
for _, es := range project.Services {
@@ -165,9 +415,9 @@ SERVICES:
continue SERVICES
}
}
- return project, errors.Wrapf(api.ErrNotFound, "no such service: %q", qs)
+ return project, fmt.Errorf("no such service: %q: %w", qs, api.ErrNotFound)
}
- err := project.ForServices(services)
+ project, err := project.WithSelectedServices(services)
if err != nil {
return project, err
}
@@ -175,28 +425,19 @@ SERVICES:
return project, nil
}
-// actualState list resources labelled by projectName to rebuild compose project model
-func (s *composeService) actualState(ctx context.Context, projectName string, services []string) (Containers, *types.Project, error) {
- var containers Containers
- // don't filter containers by options.Services so projectFromName can rebuild project with all existing resources
- containers, err := s.getContainers(ctx, projectName, oneOffInclude, true)
- if err != nil {
- return nil, nil, err
- }
-
- project, err := s.projectFromName(containers, projectName, services...)
- if err != nil && !api.IsNotFoundError(err) {
- return nil, nil, err
+func increment(scale *int) *int {
+ i := 1
+ if scale != nil {
+ i = *scale + 1
}
-
- if len(services) > 0 {
- containers = containers.filter(isService(services...))
- }
- return containers, project, nil
+ return &i
}
func (s *composeService) actualVolumes(ctx context.Context, projectName string) (types.Volumes, error) {
- volumes, err := s.apiClient().VolumeList(ctx, filters.NewArgs(projectFilter(projectName)))
+ opts := volume.ListOptions{
+ Filters: filters.NewArgs(projectFilter(projectName)),
+ }
+ volumes, err := s.apiClient().VolumeList(ctx, opts)
if err != nil {
return nil, err
}
@@ -213,7 +454,7 @@ func (s *composeService) actualVolumes(ctx context.Context, projectName string)
}
func (s *composeService) actualNetworks(ctx context.Context, projectName string) (types.Networks, error) {
- networks, err := s.apiClient().NetworkList(ctx, moby.NetworkListOptions{
+ networks, err := s.apiClient().NetworkList(ctx, network.ListOptions{
Filters: filters.NewArgs(projectFilter(projectName)),
})
if err != nil {
@@ -230,3 +471,44 @@ func (s *composeService) actualNetworks(ctx context.Context, projectName string)
}
return actual, nil
}
+
+var swarmEnabled = struct {
+ once sync.Once
+ val bool
+ err error
+}{}
+
+func (s *composeService) isSWarmEnabled(ctx context.Context) (bool, error) {
+ swarmEnabled.once.Do(func() {
+ info, err := s.apiClient().Info(ctx)
+ if err != nil {
+ swarmEnabled.err = err
+ }
+ switch info.Swarm.LocalNodeState {
+ case swarm.LocalNodeStateInactive, swarm.LocalNodeStateLocked:
+ swarmEnabled.val = false
+ default:
+ swarmEnabled.val = true
+ }
+ })
+ return swarmEnabled.val, swarmEnabled.err
+}
+
+type runtimeVersionCache struct {
+ once sync.Once
+ val string
+ err error
+}
+
+var runtimeVersion runtimeVersionCache
+
+func (s *composeService) RuntimeVersion(ctx context.Context) (string, error) {
+ runtimeVersion.once.Do(func() {
+ version, err := s.apiClient().ServerVersion(ctx)
+ if err != nil {
+ runtimeVersion.err = err
+ }
+ runtimeVersion.val = version.APIVersion
+ })
+ return runtimeVersion.val, runtimeVersion.err
+}
diff --git a/pkg/compose/container.go b/pkg/compose/container.go
index 954aed7271a..502547ddc9b 100644
--- a/pkg/compose/container.go
+++ b/pkg/compose/container.go
@@ -22,23 +22,6 @@ import (
moby "github.com/docker/docker/api/types"
)
-const (
- // ContainerCreated created status
- ContainerCreated = "created"
- // ContainerRestarting restarting status
- ContainerRestarting = "restarting"
- // ContainerRunning running status
- ContainerRunning = "running"
- // ContainerRemoving removing status
- ContainerRemoving = "removing" //nolint
- // ContainerPaused paused status
- ContainerPaused = "paused" //nolint
- // ContainerExited exited status
- ContainerExited = "exited" //nolint
- // ContainerDead dead status
- ContainerDead = "dead" //nolint
-)
-
var _ io.ReadCloser = ContainerStdout{}
// ContainerStdout implement ReadCloser for moby.HijackedResponse
diff --git a/pkg/compose/containers.go b/pkg/compose/containers.go
index ca2d0ca1667..fddf8057d61 100644
--- a/pkg/compose/containers.go
+++ b/pkg/compose/containers.go
@@ -18,18 +18,19 @@ package compose
import (
"context"
+ "fmt"
+ "slices"
"sort"
"strconv"
- moby "github.com/docker/docker/api/types"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
-
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/utils"
)
// Containers is a set of moby Container
-type Containers []moby.Container
+type Containers []container.Summary
type oneOff int
@@ -39,12 +40,28 @@ const (
oneOffOnly
)
-func (s *composeService) getContainers(ctx context.Context, project string, oneOff oneOff, stopped bool, selectedServices ...string) (Containers, error) {
+func (s *composeService) getContainers(ctx context.Context, project string, oneOff oneOff, all bool, selectedServices ...string) (Containers, error) {
var containers Containers
- f := []filters.KeyValuePair{projectFilter(project)}
+ f := getDefaultFilters(project, oneOff, selectedServices...)
+ containers, err := s.apiClient().ContainerList(ctx, container.ListOptions{
+ Filters: filters.NewArgs(f...),
+ All: all,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if len(selectedServices) > 1 {
+ containers = containers.filter(isService(selectedServices...))
+ }
+ return containers, nil
+}
+
+func getDefaultFilters(projectName string, oneOff oneOff, selectedServices ...string) []filters.KeyValuePair {
+ f := []filters.KeyValuePair{projectFilter(projectName)}
if len(selectedServices) == 1 {
f = append(f, serviceFilter(selectedServices[0]))
}
+ f = append(f, hasConfigHashLabel())
switch oneOff {
case oneOffOnly:
f = append(f, oneOffFilter(true))
@@ -52,54 +69,90 @@ func (s *composeService) getContainers(ctx context.Context, project string, oneO
f = append(f, oneOffFilter(false))
case oneOffInclude:
}
- containers, err := s.apiClient().ContainerList(ctx, moby.ContainerListOptions{
- Filters: filters.NewArgs(f...),
- All: stopped,
+ return f
+}
+
+func (s *composeService) getSpecifiedContainer(ctx context.Context, projectName string, oneOff oneOff, all bool, serviceName string, containerIndex int) (container.Summary, error) {
+ defaultFilters := getDefaultFilters(projectName, oneOff, serviceName)
+ if containerIndex > 0 {
+ defaultFilters = append(defaultFilters, containerNumberFilter(containerIndex))
+ }
+ containers, err := s.apiClient().ContainerList(ctx, container.ListOptions{
+ Filters: filters.NewArgs(
+ defaultFilters...,
+ ),
+ All: all,
})
if err != nil {
- return nil, err
+ return container.Summary{}, err
}
- if len(selectedServices) > 1 {
- containers = containers.filter(isService(selectedServices...))
+ if len(containers) < 1 {
+ if containerIndex > 0 {
+ return container.Summary{}, fmt.Errorf("service %q is not running container #%d", serviceName, containerIndex)
+ }
+ return container.Summary{}, fmt.Errorf("service %q is not running", serviceName)
}
- return containers, nil
+
+ // Sort by container number first, then put one-off containers at the end
+ sort.Slice(containers, func(i, j int) bool {
+ numberLabelX, _ := strconv.Atoi(containers[i].Labels[api.ContainerNumberLabel])
+ numberLabelY, _ := strconv.Atoi(containers[j].Labels[api.ContainerNumberLabel])
+ IsOneOffLabelTrueX := containers[i].Labels[api.OneoffLabel] == "True"
+ IsOneOffLabelTrueY := containers[j].Labels[api.OneoffLabel] == "True"
+
+ if IsOneOffLabelTrueX || IsOneOffLabelTrueY {
+ return !IsOneOffLabelTrueX && IsOneOffLabelTrueY
+ }
+
+ return numberLabelX < numberLabelY
+ })
+ return containers[0], nil
}
// containerPredicate define a predicate we want container to satisfy for filtering operations
-type containerPredicate func(c moby.Container) bool
+type containerPredicate func(c container.Summary) bool
+
+func matches(c container.Summary, predicates ...containerPredicate) bool {
+ for _, predicate := range predicates {
+ if !predicate(c) {
+ return false
+ }
+ }
+ return true
+}
func isService(services ...string) containerPredicate {
- return func(c moby.Container) bool {
+ return func(c container.Summary) bool {
service := c.Labels[api.ServiceLabel]
- return utils.StringContains(services, service)
+ return slices.Contains(services, service)
}
}
-func isNotService(services ...string) containerPredicate {
- return func(c moby.Container) bool {
+// isOrphaned is a predicate to select containers without a matching service definition in compose project
+func isOrphaned(project *types.Project) containerPredicate {
+ services := append(project.ServiceNames(), project.DisabledServiceNames()...)
+ return func(c container.Summary) bool {
+ // One-off container
+ v, ok := c.Labels[api.OneoffLabel]
+ if ok && v == "True" {
+ return c.State == container.StateExited || c.State == container.StateDead
+ }
+ // Service that is not defined in the compose model
service := c.Labels[api.ServiceLabel]
- return !utils.StringContains(services, service)
+ return !slices.Contains(services, service)
}
}
-func isNotOneOff(c moby.Container) bool {
+func isNotOneOff(c container.Summary) bool {
v, ok := c.Labels[api.OneoffLabel]
return !ok || v == "False"
}
-func indexed(index int) containerPredicate {
- return func(c moby.Container) bool {
- number := c.Labels[api.ContainerNumberLabel]
- idx, err := strconv.Atoi(number)
- return err == nil && index == idx
- }
-}
-
// filter return Containers with elements to match predicate
-func (containers Containers) filter(predicate containerPredicate) Containers {
+func (containers Containers) filter(predicates ...containerPredicate) Containers {
var filtered Containers
for _, c := range containers {
- if predicate(c) {
+ if matches(c, predicates...) {
filtered = append(filtered, c)
}
}
@@ -114,7 +167,7 @@ func (containers Containers) names() []string {
return names
}
-func (containers Containers) forEach(fn func(moby.Container)) {
+func (containers Containers) forEach(fn func(container.Summary)) {
for _, c := range containers {
fn(c)
}
diff --git a/pkg/compose/convergence.go b/pkg/compose/convergence.go
index 39b531624f2..701d0058212 100644
--- a/pkg/compose/convergence.go
+++ b/pkg/compose/convergence.go
@@ -18,33 +18,36 @@ package compose
import (
"context"
+ "errors"
"fmt"
+ "maps"
+ "slices"
+ "sort"
"strconv"
"strings"
"sync"
"time"
- "github.com/compose-spec/compose-go/types"
- "github.com/containerd/containerd/platforms"
- moby "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/network"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/containerd/platforms"
+ "github.com/docker/docker/api/types/container"
+ mmount "github.com/docker/docker/api/types/mount"
+ "github.com/docker/docker/api/types/versions"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/progress"
- "github.com/docker/compose/v2/pkg/utils"
+ "github.com/docker/compose/v5/internal/tracing"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/utils"
)
const (
- extLifecycle = "x-lifecycle"
- forceRecreate = "force_recreate"
-
doubledContainerNameWarning = "WARNING: The %q service is using the custom container name %q. " +
"Docker requires each container to have a unique name. " +
- "Remove the custom name to scale the service.\n"
+ "Remove the custom name to scale the service"
)
// convergence manages service's container lifecycle.
@@ -53,24 +56,26 @@ const (
// Cross services dependencies are managed by creating services in expected order and updating `service:xx` reference
// when a service has converged, so dependent ones can be managed with resolved containers references.
type convergence struct {
- service *composeService
- observedState map[string]Containers
- stateMutex sync.Mutex
+ compose *composeService
+ services map[string]Containers
+ networks map[string]string
+ volumes map[string]string
+ stateMutex sync.Mutex
}
func (c *convergence) getObservedState(serviceName string) Containers {
c.stateMutex.Lock()
defer c.stateMutex.Unlock()
- return c.observedState[serviceName]
+ return c.services[serviceName]
}
func (c *convergence) setObservedState(serviceName string, containers Containers) {
c.stateMutex.Lock()
defer c.stateMutex.Unlock()
- c.observedState[serviceName] = containers
+ c.services[serviceName] = containers
}
-func newConvergence(services []string, state Containers, s *composeService) *convergence {
+func newConvergence(services []string, state Containers, networks map[string]string, volumes map[string]string, s *composeService) *convergence {
observedState := map[string]Containers{}
for _, s := range services {
observedState[s] = Containers{}
@@ -80,8 +85,10 @@ func newConvergence(services []string, state Containers, s *composeService) *con
observedState[service] = append(observedState[service], c)
}
return &convergence{
- service: s,
- observedState: observedState,
+ compose: s,
+ services: observedState,
+ networks: networks,
+ volumes: volumes,
}
}
@@ -92,79 +99,20 @@ func (c *convergence) apply(ctx context.Context, project *types.Project, options
return err
}
- strategy := options.RecreateDependencies
- if utils.StringContains(options.Services, name) {
- strategy = options.Recreate
- }
- err = c.ensureService(ctx, project, service, strategy, options.Inherit, options.Timeout)
- if err != nil {
- return err
- }
-
- c.updateProject(project, name)
- return nil
+ return tracing.SpanWrapFunc("service/apply", tracing.ServiceOptions(service), func(ctx context.Context) error {
+ strategy := options.RecreateDependencies
+ if slices.Contains(options.Services, name) {
+ strategy = options.Recreate
+ }
+ return c.ensureService(ctx, project, service, strategy, options.Inherit, options.Timeout)
+ })(ctx)
})
}
-var mu sync.Mutex
-
-// updateProject updates project after service converged, so dependent services relying on `service:xx` can refer to actual containers.
-func (c *convergence) updateProject(project *types.Project, serviceName string) {
- // operation is protected by a Mutex so that we can safely update project.Services while running concurrent convergence on services
- mu.Lock()
- defer mu.Unlock()
-
- cnts := c.getObservedState(serviceName)
- for i, s := range project.Services {
- updateServices(&s, cnts)
- project.Services[i] = s
- }
-}
-
-func updateServices(service *types.ServiceConfig, cnts Containers) {
- if len(cnts) == 0 {
- return
- }
- cnt := cnts[0]
- serviceName := cnt.Labels[api.ServiceLabel]
-
- if d := getDependentServiceFromMode(service.NetworkMode); d == serviceName {
- service.NetworkMode = types.NetworkModeContainerPrefix + cnt.ID
- }
- if d := getDependentServiceFromMode(service.Ipc); d == serviceName {
- service.Ipc = types.NetworkModeContainerPrefix + cnt.ID
- }
- if d := getDependentServiceFromMode(service.Pid); d == serviceName {
- service.Pid = types.NetworkModeContainerPrefix + cnt.ID
+func (c *convergence) ensureService(ctx context.Context, project *types.Project, service types.ServiceConfig, recreate string, inherit bool, timeout *time.Duration) error { //nolint:gocyclo
+ if service.Provider != nil {
+ return c.compose.runPlugin(ctx, project, service, "up")
}
- var links []string
- for _, serviceLink := range service.Links {
- parts := strings.Split(serviceLink, ":")
- serviceName := serviceLink
- serviceAlias := ""
- if len(parts) == 2 {
- serviceName = parts[0]
- serviceAlias = parts[1]
- }
- if serviceName != service.Name {
- links = append(links, serviceLink)
- continue
- }
- for _, container := range cnts {
- name := getCanonicalContainerName(container)
- if serviceAlias != "" {
- links = append(links,
- fmt.Sprintf("%s:%s", name, serviceAlias))
- }
- links = append(links,
- fmt.Sprintf("%s:%s", name, name),
- fmt.Sprintf("%s:%s", name, getContainerNameWithoutProject(container)))
- }
- service.Links = links
- }
-}
-
-func (c *convergence) ensureService(ctx context.Context, project *types.Project, service types.ServiceConfig, recreate string, inherit bool, timeout *time.Duration) error {
expected, err := getScale(service)
if err != nil {
return err
@@ -175,67 +123,99 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
eg, _ := errgroup.WithContext(ctx)
- for i, container := range containers {
+ err = c.resolveServiceReferences(&service)
+ if err != nil {
+ return err
+ }
+
+ sort.Slice(containers, func(i, j int) bool {
+ // select obsolete containers first, so they get removed as we scale down
+ if obsolete, _ := c.mustRecreate(service, containers[i], recreate); obsolete {
+ // i is obsolete, so must be first in the list
+ return true
+ }
+ if obsolete, _ := c.mustRecreate(service, containers[j], recreate); obsolete {
+ // j is obsolete, so must be first in the list
+ return false
+ }
+
+ // For up-to-date containers, sort by container number to preserve low-values in container numbers
+ ni, erri := strconv.Atoi(containers[i].Labels[api.ContainerNumberLabel])
+ nj, errj := strconv.Atoi(containers[j].Labels[api.ContainerNumberLabel])
+ if erri == nil && errj == nil {
+ return ni > nj
+ }
+
+ // If we don't get a container number (?) just sort by creation date
+ return containers[i].Created < containers[j].Created
+ })
+
+ slices.Reverse(containers)
+ for i, ctr := range containers {
if i >= expected {
// Scale Down
- container := container
- eg.Go(func() error {
- err := c.service.apiClient().ContainerStop(ctx, container.ID, timeout)
- if err != nil {
- return err
- }
- return c.service.apiClient().ContainerRemove(ctx, container.ID, moby.ContainerRemoveOptions{})
- })
+ // As we sorted containers, obsolete ones and/or highest number will be removed
+ ctr := ctr
+ traceOpts := append(tracing.ServiceOptions(service), tracing.ContainerOptions(ctr)...)
+ eg.Go(tracing.SpanWrapFuncForErrGroup(ctx, "service/scale/down", traceOpts, func(ctx context.Context) error {
+ return c.compose.stopAndRemoveContainer(ctx, ctr, &service, timeout, false)
+ }))
continue
}
- mustRecreate, err := mustRecreate(service, container, recreate)
+ mustRecreate, err := c.mustRecreate(service, ctr, recreate)
if err != nil {
return err
}
if mustRecreate {
- i, container := i, container
- eg.Go(func() error {
- recreated, err := c.service.recreateContainer(ctx, project, service, container, inherit, timeout)
+ err := c.stopDependentContainers(ctx, project, service)
+ if err != nil {
+ return err
+ }
+
+ i, ctr := i, ctr
+ eg.Go(tracing.SpanWrapFuncForErrGroup(ctx, "container/recreate", tracing.ContainerOptions(ctr), func(ctx context.Context) error {
+ recreated, err := c.compose.recreateContainer(ctx, project, service, ctr, inherit, timeout)
updated[i] = recreated
return err
- })
+ }))
continue
}
// Enforce non-diverged containers are running
- w := progress.ContextWriter(ctx)
- name := getContainerProgressName(container)
- switch container.State {
- case ContainerRunning:
- w.Event(progress.RunningEvent(name))
- case ContainerCreated:
- case ContainerRestarting:
- case ContainerExited:
- w.Event(progress.CreatedEvent(name))
+ name := getContainerProgressName(ctr)
+ switch ctr.State {
+ case container.StateRunning:
+ c.compose.events.On(runningEvent(name))
+ case container.StateCreated:
+ case container.StateRestarting:
+ case container.StateExited:
default:
- container := container
- eg.Go(func() error {
- return c.service.startContainer(ctx, container)
- })
+ ctr := ctr
+ eg.Go(tracing.EventWrapFuncForErrGroup(ctx, "service/start", tracing.ContainerOptions(ctr), func(ctx context.Context) error {
+ return c.compose.startContainer(ctx, ctr)
+ }))
}
- updated[i] = container
+ updated[i] = ctr
}
- next, err := nextContainerNumber(containers)
- if err != nil {
- return err
- }
+ next := nextContainerNumber(containers)
for i := 0; i < expected-actual; i++ {
// Scale UP
number := next + i
name := getContainerName(project.Name, service, number)
- i := i
- eg.Go(func() error {
- container, err := c.service.createContainer(ctx, project, service, name, number, false, true, false)
- updated[actual+i] = container
+ eventOpts := tracing.SpanOptions{trace.WithAttributes(attribute.String("container.name", name))}
+ eg.Go(tracing.EventWrapFuncForErrGroup(ctx, "service/scale/up", eventOpts, func(ctx context.Context) error {
+ opts := createOptions{
+ AutoRemove: false,
+ AttachStdin: false,
+ UseNetworkAliases: true,
+ Labels: mergeLabels(service.Labels, service.CustomLabels),
+ }
+ ctr, err := c.compose.createContainer(ctx, project, service, name, number, opts)
+ updated[actual+i] = ctr
return err
- })
+ }))
continue
}
@@ -244,11 +224,113 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
return err
}
-func mustRecreate(expected types.ServiceConfig, actual moby.Container, policy string) (bool, error) {
+func (c *convergence) stopDependentContainers(ctx context.Context, project *types.Project, service types.ServiceConfig) error {
+ // Stop dependent containers, so they will be restarted after service is re-created
+ dependents := project.GetDependentsForService(service, func(dependency types.ServiceDependency) bool {
+ return dependency.Restart
+ })
+ if len(dependents) == 0 {
+ return nil
+ }
+ err := c.compose.stop(ctx, project.Name, api.StopOptions{
+ Services: dependents,
+ Project: project,
+ }, nil)
+ if err != nil {
+ return err
+ }
+
+ for _, name := range dependents {
+ dependentStates := c.getObservedState(name)
+ for i, dependent := range dependentStates {
+ dependent.State = container.StateExited
+ dependentStates[i] = dependent
+ }
+ c.setObservedState(name, dependentStates)
+ }
+ return nil
+}
+
+func getScale(config types.ServiceConfig) (int, error) {
+ scale := config.GetScale()
+ if scale > 1 && config.ContainerName != "" {
+ return 0, fmt.Errorf(doubledContainerNameWarning,
+ config.Name,
+ config.ContainerName)
+ }
+ return scale, nil
+}
+
+// resolveServiceReferences replaces reference to another service with reference to an actual container
+func (c *convergence) resolveServiceReferences(service *types.ServiceConfig) error {
+ err := c.resolveVolumeFrom(service)
+ if err != nil {
+ return err
+ }
+
+ err = c.resolveSharedNamespaces(service)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *convergence) resolveVolumeFrom(service *types.ServiceConfig) error {
+ for i, vol := range service.VolumesFrom {
+ spec := strings.Split(vol, ":")
+ if len(spec) == 0 {
+ continue
+ }
+ if spec[0] == "container" {
+ service.VolumesFrom[i] = spec[1]
+ continue
+ }
+ name := spec[0]
+ dependencies := c.getObservedState(name)
+ if len(dependencies) == 0 {
+ return fmt.Errorf("cannot share volume with service %s: container missing", name)
+ }
+ service.VolumesFrom[i] = dependencies.sorted()[0].ID
+ }
+ return nil
+}
+
+func (c *convergence) resolveSharedNamespaces(service *types.ServiceConfig) error {
+ str := service.NetworkMode
+ if name := getDependentServiceFromMode(str); name != "" {
+ dependencies := c.getObservedState(name)
+ if len(dependencies) == 0 {
+ return fmt.Errorf("cannot share network namespace with service %s: container missing", name)
+ }
+ service.NetworkMode = types.ContainerPrefix + dependencies.sorted()[0].ID
+ }
+
+ str = service.Ipc
+ if name := getDependentServiceFromMode(str); name != "" {
+ dependencies := c.getObservedState(name)
+ if len(dependencies) == 0 {
+ return fmt.Errorf("cannot share IPC namespace with service %s: container missing", name)
+ }
+ service.Ipc = types.ContainerPrefix + dependencies.sorted()[0].ID
+ }
+
+ str = service.Pid
+ if name := getDependentServiceFromMode(str); name != "" {
+ dependencies := c.getObservedState(name)
+ if len(dependencies) == 0 {
+ return fmt.Errorf("cannot share PID namespace with service %s: container missing", name)
+ }
+ service.Pid = types.ContainerPrefix + dependencies.sorted()[0].ID
+ }
+
+ return nil
+}
+
+func (c *convergence) mustRecreate(expected types.ServiceConfig, actual container.Summary, policy string) (bool, error) {
if policy == api.RecreateNever {
return false, nil
}
- if policy == api.RecreateForce || expected.Extensions[extLifecycle] == forceRecreate {
+ if policy == api.RecreateForce {
return true, nil
}
configHash, err := ServiceHash(expected)
@@ -257,35 +339,119 @@ func mustRecreate(expected types.ServiceConfig, actual moby.Container, policy st
}
configChanged := actual.Labels[api.ConfigHashLabel] != configHash
imageUpdated := actual.Labels[api.ImageDigestLabel] != expected.CustomLabels[api.ImageDigestLabel]
- return configChanged || imageUpdated, nil
+ if configChanged || imageUpdated {
+ return true, nil
+ }
+
+ if c.networks != nil && actual.State == "running" {
+ if checkExpectedNetworks(expected, actual, c.networks) {
+ return true, nil
+ }
+ }
+
+ if c.volumes != nil {
+ if checkExpectedVolumes(expected, actual, c.volumes) {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
+
+func checkExpectedNetworks(expected types.ServiceConfig, actual container.Summary, networks map[string]string) bool {
+ // check the networks container is connected to are the expected ones
+ for net := range expected.Networks {
+ id := networks[net]
+ if id == "swarm" {
+ // corner-case : swarm overlay network isn't visible until a container is attached
+ continue
+ }
+ found := false
+ for _, settings := range actual.NetworkSettings.Networks {
+ if settings.NetworkID == id {
+ found = true
+ break
+ }
+ }
+ if !found {
+ // config is up-to-date but container is not connected to network
+ return true
+ }
+ }
+ return false
+}
+
+func checkExpectedVolumes(expected types.ServiceConfig, actual container.Summary, volumes map[string]string) bool {
+ // check container's volume mounts and search for the expected ones
+ for _, vol := range expected.Volumes {
+ if vol.Type != string(mmount.TypeVolume) {
+ continue
+ }
+ if vol.Source == "" {
+ continue
+ }
+ id := volumes[vol.Source]
+ found := false
+ for _, mount := range actual.Mounts {
+ if mount.Type != mmount.TypeVolume {
+ continue
+ }
+ if mount.Name == id {
+ found = true
+ break
+ }
+ }
+ if !found {
+ // config is up-to-date but container doesn't have volume mounted
+ return true
+ }
+ }
+ return false
}
func getContainerName(projectName string, service types.ServiceConfig, number int) string {
- name := strings.Join([]string{projectName, service.Name, strconv.Itoa(number)}, Separator)
+ name := getDefaultContainerName(projectName, service.Name, strconv.Itoa(number))
if service.ContainerName != "" {
name = service.ContainerName
}
return name
}
-func getContainerProgressName(container moby.Container) string {
- return "Container " + getCanonicalContainerName(container)
+func getDefaultContainerName(projectName, serviceName, index string) string {
+ return strings.Join([]string{projectName, serviceName, index}, api.Separator)
}
-func containerEvents(containers Containers, eventFunc func(string) progress.Event) []progress.Event {
- events := []progress.Event{}
- for _, container := range containers {
- events = append(events, eventFunc(getContainerProgressName(container)))
+func getContainerProgressName(ctr container.Summary) string {
+ return "Container " + getCanonicalContainerName(ctr)
+}
+
+func containerEvents(containers Containers, eventFunc func(string) api.Resource) []api.Resource {
+ events := []api.Resource{}
+ for _, ctr := range containers {
+ events = append(events, eventFunc(getContainerProgressName(ctr)))
}
return events
}
-// ServiceConditionRunningOrHealthy is a service condition on statys running or healthy
+func containerReasonEvents(containers Containers, eventFunc func(string, string) api.Resource, reason string) []api.Resource {
+ events := []api.Resource{}
+ for _, ctr := range containers {
+ events = append(events, eventFunc(getContainerProgressName(ctr), reason))
+ }
+ return events
+}
+
+// ServiceConditionRunningOrHealthy is a service condition on status running or healthy
const ServiceConditionRunningOrHealthy = "running_or_healthy"
-func (s *composeService) waitDependencies(ctx context.Context, project *types.Project, dependencies types.DependsOnConfig) error {
+//nolint:gocyclo
+func (s *composeService) waitDependencies(ctx context.Context, project *types.Project, dependant string, dependencies types.DependsOnConfig, containers Containers, timeout time.Duration) error {
+ if timeout > 0 {
+ withTimeout, cancelFunc := context.WithTimeout(ctx, timeout)
+ defer cancelFunc()
+ ctx = withTimeout
+ }
eg, _ := errgroup.WithContext(ctx)
- w := progress.ContextWriter(ctx)
for dep, config := range dependencies {
if shouldWait, err := shouldWaitForDependency(dep, config, project); err != nil {
return err
@@ -293,48 +459,84 @@ func (s *composeService) waitDependencies(ctx context.Context, project *types.Pr
continue
}
- containers, err := s.getContainers(ctx, project.Name, oneOffExclude, false, dep)
- if err != nil {
- return err
+ waitingFor := containers.filter(isService(dep), isNotOneOff)
+ s.events.On(containerEvents(waitingFor, waiting)...)
+ if len(waitingFor) == 0 {
+ if config.Required {
+ return fmt.Errorf("%s is missing dependency %s", dependant, dep)
+ }
+ logrus.Warnf("%s is missing dependency %s", dependant, dep)
+ continue
}
- w.Events(containerEvents(containers, progress.Waiting))
- dep, config := dep, config
eg.Go(func() error {
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
for {
- <-ticker.C
+ select {
+ case <-ticker.C:
+ case <-ctx.Done():
+ return nil
+ }
switch config.Condition {
case ServiceConditionRunningOrHealthy:
- healthy, err := s.isServiceHealthy(ctx, project, dep, true)
+ isHealthy, err := s.isServiceHealthy(ctx, waitingFor, true)
if err != nil {
+ if !config.Required {
+ s.events.On(containerReasonEvents(waitingFor, skippedEvent,
+ fmt.Sprintf("optional dependency %q is not running or is unhealthy", dep))...)
+ logrus.Warnf("optional dependency %q is not running or is unhealthy: %s", dep, err.Error())
+ return nil
+ }
return err
}
- if healthy {
- w.Events(containerEvents(containers, progress.Healthy))
+ if isHealthy {
+ s.events.On(containerEvents(waitingFor, healthy)...)
return nil
}
case types.ServiceConditionHealthy:
- healthy, err := s.isServiceHealthy(ctx, project, dep, false)
+ isHealthy, err := s.isServiceHealthy(ctx, waitingFor, false)
if err != nil {
- return err
+ if !config.Required {
+ s.events.On(containerReasonEvents(waitingFor, skippedEvent,
+ fmt.Sprintf("optional dependency %q failed to start", dep))...)
+ logrus.Warnf("optional dependency %q failed to start: %s", dep, err.Error())
+ return nil
+ }
+ s.events.On(containerEvents(waitingFor, func(s string) api.Resource {
+ return errorEventf(s, "dependency %s failed to start", dep)
+ })...)
+ return fmt.Errorf("dependency failed to start: %w", err)
}
- if healthy {
- w.Events(containerEvents(containers, progress.Healthy))
+ if isHealthy {
+ s.events.On(containerEvents(waitingFor, healthy)...)
return nil
}
case types.ServiceConditionCompletedSuccessfully:
- exited, code, err := s.isServiceCompleted(ctx, project, dep)
+ isExited, code, err := s.isServiceCompleted(ctx, waitingFor)
if err != nil {
return err
}
- if exited {
- w.Events(containerEvents(containers, progress.Exited))
- if code != 0 {
- return fmt.Errorf("service %q didn't completed successfully: exit %d", dep, code)
+ if isExited {
+ if code == 0 {
+ s.events.On(containerEvents(waitingFor, exited)...)
+ return nil
}
- return nil
+
+ messageSuffix := fmt.Sprintf("%q didn't complete successfully: exit %d", dep, code)
+ if !config.Required {
+ // optional -> mark as skipped & don't propagate error
+ s.events.On(containerReasonEvents(waitingFor, skippedEvent,
+ fmt.Sprintf("optional dependency %s", messageSuffix))...)
+ logrus.Warnf("optional dependency %s", messageSuffix)
+ return nil
+ }
+
+ msg := fmt.Sprintf("service %s", messageSuffix)
+ s.events.On(containerEvents(waitingFor, func(s string) api.Resource {
+ return errorEventf(s, "service %s", messageSuffix)
+ })...)
+ return errors.New(msg)
}
default:
logrus.Warnf("unsupported depends_on condition: %s", config.Condition)
@@ -343,7 +545,11 @@ func (s *composeService) waitDependencies(ctx context.Context, project *types.Pr
}
})
}
- return eg.Wait()
+ err := eg.Wait()
+ if errors.Is(err, context.DeadlineExceeded) {
+ return fmt.Errorf("timeout waiting for dependencies")
+ }
+ return err
}
func shouldWaitForDependency(serviceName string, dependencyConfig types.ServiceDependency, project *types.Project) (bool, error) {
@@ -352,182 +558,213 @@ func shouldWaitForDependency(serviceName string, dependencyConfig types.ServiceD
return false, nil
}
if service, err := project.GetService(serviceName); err != nil {
+ for _, ds := range project.DisabledServices {
+ if ds.Name == serviceName {
+ // don't wait for disabled service (--no-deps)
+ return false, nil
+ }
+ }
return false, err
- } else if service.Scale == 0 {
+ } else if service.GetScale() == 0 {
// don't wait for the dependency which configured to have 0 containers running
return false, nil
+ } else if service.Provider != nil {
+ // don't wait for provider services
+ return false, nil
}
return true, nil
}
-func nextContainerNumber(containers []moby.Container) (int, error) {
- max := 0
+func nextContainerNumber(containers []container.Summary) int {
+ maxNumber := 0
for _, c := range containers {
- n, err := strconv.Atoi(c.Labels[api.ContainerNumberLabel])
+ s, ok := c.Labels[api.ContainerNumberLabel]
+ if !ok {
+ logrus.Warnf("container %s is missing %s label", c.ID, api.ContainerNumberLabel)
+ }
+ n, err := strconv.Atoi(s)
if err != nil {
- return 0, err
+ logrus.Warnf("container %s has invalid %s label: %s", c.ID, api.ContainerNumberLabel, s)
+ continue
}
- if n > max {
- max = n
+ if n > maxNumber {
+ maxNumber = n
}
}
- return max + 1, nil
-
-}
-
-func getScale(config types.ServiceConfig) (int, error) {
- scale := 1
- if config.Deploy != nil && config.Deploy.Replicas != nil {
- scale = int(*config.Deploy.Replicas)
- }
- if scale > 1 && config.ContainerName != "" {
- return 0, fmt.Errorf(doubledContainerNameWarning,
- config.Name,
- config.ContainerName)
- }
- return scale, nil
+ return maxNumber + 1
}
func (s *composeService) createContainer(ctx context.Context, project *types.Project, service types.ServiceConfig,
- name string, number int, autoRemove bool, useNetworkAliases bool, attachStdin bool) (container moby.Container, err error) {
- w := progress.ContextWriter(ctx)
+ name string, number int, opts createOptions,
+) (ctr container.Summary, err error) {
eventName := "Container " + name
- w.Event(progress.CreatingEvent(eventName))
- container, err = s.createMobyContainer(ctx, project, service, name, number, nil, autoRemove, useNetworkAliases, attachStdin)
+ s.events.On(creatingEvent(eventName))
+ ctr, err = s.createMobyContainer(ctx, project, service, name, number, nil, opts)
if err != nil {
- return
+ if ctx.Err() == nil {
+ s.events.On(api.Resource{
+ ID: eventName,
+ Status: api.Error,
+ Text: err.Error(),
+ })
+ }
+ return ctr, err
}
- w.Event(progress.CreatedEvent(eventName))
- return
+ s.events.On(createdEvent(eventName))
+ return ctr, nil
}
func (s *composeService) recreateContainer(ctx context.Context, project *types.Project, service types.ServiceConfig,
- replaced moby.Container, inherit bool, timeout *time.Duration) (moby.Container, error) {
- var created moby.Container
- w := progress.ContextWriter(ctx)
- w.Event(progress.NewEvent(getContainerProgressName(replaced), progress.Working, "Recreate"))
- err := s.apiClient().ContainerStop(ctx, replaced.ID, timeout)
+ replaced container.Summary, inherit bool, timeout *time.Duration,
+) (created container.Summary, err error) {
+ eventName := getContainerProgressName(replaced)
+ s.events.On(newEvent(eventName, api.Working, "Recreate"))
+ defer func() {
+ if err != nil && ctx.Err() == nil {
+ s.events.On(api.Resource{
+ ID: eventName,
+ Status: api.Error,
+ Text: err.Error(),
+ })
+ }
+ }()
+
+ number, err := strconv.Atoi(replaced.Labels[api.ContainerNumberLabel])
if err != nil {
return created, err
}
- name := getCanonicalContainerName(replaced)
+
+ var inherited *container.Summary
+ if inherit {
+ inherited = &replaced
+ }
+
+ replacedContainerName := service.ContainerName
+ if replacedContainerName == "" {
+ replacedContainerName = service.Name + api.Separator + strconv.Itoa(number)
+ }
+ name := getContainerName(project.Name, service, number)
tmpName := fmt.Sprintf("%s_%s", replaced.ID[:12], name)
- err = s.apiClient().ContainerRename(ctx, replaced.ID, tmpName)
+ opts := createOptions{
+ AutoRemove: false,
+ AttachStdin: false,
+ UseNetworkAliases: true,
+ Labels: mergeLabels(service.Labels, service.CustomLabels).Add(api.ContainerReplaceLabel, replacedContainerName),
+ }
+ created, err = s.createMobyContainer(ctx, project, service, tmpName, number, inherited, opts)
if err != nil {
return created, err
}
- number, err := strconv.Atoi(replaced.Labels[api.ContainerNumberLabel])
+
+ timeoutInSecond := utils.DurationSecondToInt(timeout)
+ err = s.apiClient().ContainerStop(ctx, replaced.ID, container.StopOptions{Timeout: timeoutInSecond})
if err != nil {
return created, err
}
- var inherited *moby.Container
- if inherit {
- inherited = &replaced
- }
- name = getContainerName(project.Name, service, number)
- created, err = s.createMobyContainer(ctx, project, service, name, number, inherited, false, true, false)
+ err = s.apiClient().ContainerRemove(ctx, replaced.ID, container.RemoveOptions{})
if err != nil {
return created, err
}
- err = s.apiClient().ContainerRemove(ctx, replaced.ID, moby.ContainerRemoveOptions{})
+
+ err = s.apiClient().ContainerRename(ctx, tmpName, name)
if err != nil {
return created, err
}
- w.Event(progress.NewEvent(getContainerProgressName(replaced), progress.Done, "Recreated"))
- setDependentLifecycle(project, service.Name, forceRecreate)
+
+ s.events.On(newEvent(eventName, api.Done, "Recreated"))
return created, err
}
-// setDependentLifecycle define the Lifecycle strategy for all services to depend on specified service
-func setDependentLifecycle(project *types.Project, service string, strategy string) {
- for i, s := range project.Services {
- if utils.StringContains(s.GetDependencies(), service) {
- if s.Extensions == nil {
- s.Extensions = map[string]interface{}{}
- }
- s.Extensions[extLifecycle] = strategy
- project.Services[i] = s
- }
- }
-}
+// force sequential calls to ContainerStart to prevent race condition in engine assigning ports from ranges
+var startMx sync.Mutex
-func (s *composeService) startContainer(ctx context.Context, container moby.Container) error {
- w := progress.ContextWriter(ctx)
- w.Event(progress.NewEvent(getContainerProgressName(container), progress.Working, "Restart"))
- err := s.apiClient().ContainerStart(ctx, container.ID, moby.ContainerStartOptions{})
+func (s *composeService) startContainer(ctx context.Context, ctr container.Summary) error {
+ s.events.On(newEvent(getContainerProgressName(ctr), api.Working, "Restart"))
+ startMx.Lock()
+ defer startMx.Unlock()
+ err := s.apiClient().ContainerStart(ctx, ctr.ID, container.StartOptions{})
if err != nil {
return err
}
- w.Event(progress.NewEvent(getContainerProgressName(container), progress.Done, "Restarted"))
+ s.events.On(newEvent(getContainerProgressName(ctr), api.Done, "Restarted"))
return nil
}
func (s *composeService) createMobyContainer(ctx context.Context, project *types.Project, service types.ServiceConfig,
- name string, number int, inherit *moby.Container, autoRemove bool, useNetworkAliases bool, attachStdin bool) (moby.Container, error) {
- var created moby.Container
- containerConfig, hostConfig, networkingConfig, err := s.getCreateOptions(ctx, project, service, number, inherit, autoRemove, attachStdin)
+ name string, number int, inherit *container.Summary, opts createOptions,
+) (container.Summary, error) {
+ var created container.Summary
+ cfgs, err := s.getCreateConfigs(ctx, project, service, number, inherit, opts)
if err != nil {
return created, err
}
+ platform := service.Platform
+ if platform == "" {
+ platform = project.Environment["DOCKER_DEFAULT_PLATFORM"]
+ }
var plat *specs.Platform
- if service.Platform != "" {
+ if platform != "" {
var p specs.Platform
- p, err = platforms.Parse(service.Platform)
+ p, err = platforms.Parse(platform)
if err != nil {
return created, err
}
plat = &p
}
- response, err := s.apiClient().ContainerCreate(ctx, containerConfig, hostConfig, networkingConfig, plat, name)
+
+ response, err := s.apiClient().ContainerCreate(ctx, cfgs.Container, cfgs.Host, cfgs.Network, plat, name)
if err != nil {
return created, err
}
+ for _, warning := range response.Warnings {
+ s.events.On(api.Resource{
+ ID: service.Name,
+ Status: api.Warning,
+ Text: warning,
+ })
+ }
inspectedContainer, err := s.apiClient().ContainerInspect(ctx, response.ID)
if err != nil {
return created, err
}
- created = moby.Container{
+ created = container.Summary{
ID: inspectedContainer.ID,
Labels: inspectedContainer.Config.Labels,
Names: []string{inspectedContainer.Name},
- NetworkSettings: &moby.SummaryNetworkSettings{
+ NetworkSettings: &container.NetworkSettingsSummary{
Networks: inspectedContainer.NetworkSettings.Networks,
},
}
- links, err := s.getLinks(ctx, project.Name, service, number)
+
+ apiVersion, err := s.RuntimeVersion(ctx)
if err != nil {
return created, err
}
- for _, netName := range service.NetworksByPriority() {
- netwrk := project.Networks[netName]
- cfg := service.Networks[netName]
- aliases := []string{getContainerName(project.Name, service, number)}
- if useNetworkAliases {
- aliases = append(aliases, service.Name)
- if cfg != nil {
- aliases = append(aliases, cfg.Aliases...)
- }
- }
- if val, ok := created.NetworkSettings.Networks[netwrk.Name]; ok {
- if shortIDAliasExists(created.ID, val.Aliases...) {
+ // Starting API version 1.44, the ContainerCreate API call takes multiple networks
+ // so we include all the configurations there and can skip the one-by-one calls here
+ if versions.LessThan(apiVersion, "1.44") {
+ // the highest-priority network is the primary and is included in the ContainerCreate API
+ // call via container.NetworkMode & network.NetworkingConfig
+ // any remaining networks are connected one-by-one here after creation (but before start)
+ serviceNetworks := service.NetworksByPriority()
+ for _, networkKey := range serviceNetworks {
+ mobyNetworkName := project.Networks[networkKey].Name
+ if string(cfgs.Host.NetworkMode) == mobyNetworkName {
+ // primary network already configured as part of ContainerCreate
continue
}
- err = s.apiClient().NetworkDisconnect(ctx, netwrk.Name, created.ID, false)
- if err != nil {
+ epSettings := createEndpointSettings(project, service, number, networkKey, cfgs.Links, opts.UseNetworkAliases)
+ if err := s.apiClient().NetworkConnect(ctx, mobyNetworkName, created.ID, epSettings); err != nil {
return created, err
}
}
- err = s.connectContainerToNetwork(ctx, created.ID, netwrk.Name, cfg, links, aliases...)
- if err != nil {
- return created, err
- }
}
- return created, err
+ return created, nil
}
// getLinks mimics V1 compose/service.py::Service::_get_links()
-func (s composeService) getLinks(ctx context.Context, projectName string, service types.ServiceConfig, number int) ([]string, error) {
+func (s *composeService) getLinks(ctx context.Context, projectName string, service types.ServiceConfig, number int) ([]string, error) {
var links []string
format := func(k, v string) string {
return fmt.Sprintf("%s:%s", k, v)
@@ -551,8 +788,8 @@ func (s composeService) getLinks(ctx context.Context, projectName string, servic
containerName := getCanonicalContainerName(c)
links = append(links,
format(containerName, linkName),
- format(containerName, strings.Join([]string{linkServiceName, strconv.Itoa(number)}, Separator)),
- format(containerName, strings.Join([]string{projectName, linkServiceName, strconv.Itoa(number)}, Separator)),
+ format(containerName, linkServiceName+api.Separator+strconv.Itoa(number)),
+ format(containerName, strings.Join([]string{projectName, linkServiceName, strconv.Itoa(number)}, api.Separator)),
)
}
}
@@ -566,7 +803,7 @@ func (s composeService) getLinks(ctx context.Context, projectName string, servic
containerName := getCanonicalContainerName(c)
links = append(links,
format(containerName, service.Name),
- format(containerName, strings.TrimPrefix(containerName, projectName+Separator)),
+ format(containerName, strings.TrimPrefix(containerName, projectName+api.Separator)),
format(containerName, containerName),
)
}
@@ -584,139 +821,112 @@ func (s composeService) getLinks(ctx context.Context, projectName string, servic
return links, nil
}
-func shortIDAliasExists(containerID string, aliases ...string) bool {
- for _, alias := range aliases {
- if alias == containerID[:12] {
- return true
- }
- }
- return false
-}
-
-func (s *composeService) connectContainerToNetwork(ctx context.Context, id string, netwrk string, cfg *types.ServiceNetworkConfig, links []string, aliases ...string) error {
- var (
- ipv4Address string
- ipv6Address string
- ipam *network.EndpointIPAMConfig
- )
- if cfg != nil {
- ipv4Address = cfg.Ipv4Address
- ipv6Address = cfg.Ipv6Address
- ipam = &network.EndpointIPAMConfig{
- IPv4Address: ipv4Address,
- IPv6Address: ipv6Address,
- }
- }
- err := s.apiClient().NetworkConnect(ctx, netwrk, id, &network.EndpointSettings{
- Aliases: aliases,
- IPAddress: ipv4Address,
- GlobalIPv6Address: ipv6Address,
- Links: links,
- IPAMConfig: ipam,
- })
- if err != nil {
- return err
- }
- return nil
-}
-
-func (s *composeService) isServiceHealthy(ctx context.Context, project *types.Project, service string, fallbackRunning bool) (bool, error) {
- containers, err := s.getContainers(ctx, project.Name, oneOffExclude, false, service)
- if err != nil {
- return false, err
- }
-
- if len(containers) == 0 {
- return false, nil
- }
+func (s *composeService) isServiceHealthy(ctx context.Context, containers Containers, fallbackRunning bool) (bool, error) {
for _, c := range containers {
- container, err := s.apiClient().ContainerInspect(ctx, c.ID)
+ ctr, err := s.apiClient().ContainerInspect(ctx, c.ID)
if err != nil {
return false, err
}
- if container.Config.Healthcheck == nil && fallbackRunning {
+ name := ctr.Name[1:]
+
+ if ctr.State.Status == container.StateExited {
+ return false, fmt.Errorf("container %s exited (%d)", name, ctr.State.ExitCode)
+ }
+
+ if ctr.Config.Healthcheck == nil && fallbackRunning {
// Container does not define a health check, but we can fall back to "running" state
- return container.State != nil && container.State.Status == "running", nil
+ return ctr.State != nil && ctr.State.Status == container.StateRunning, nil
}
- if container.State == nil || container.State.Health == nil {
- return false, fmt.Errorf("container for service %q has no healthcheck configured", service)
+ if ctr.State == nil || ctr.State.Health == nil {
+ return false, fmt.Errorf("container %s has no healthcheck configured", name)
}
- switch container.State.Health.Status {
- case moby.Healthy:
+ switch ctr.State.Health.Status {
+ case container.Healthy:
// Continue by checking the next container.
- case moby.Unhealthy:
- return false, fmt.Errorf("container for service %q is unhealthy", service)
- case moby.Starting:
+ case container.Unhealthy:
+ return false, fmt.Errorf("container %s is unhealthy", name)
+ case container.Starting:
return false, nil
default:
- return false, fmt.Errorf("container for service %q had unexpected health status %q", service, container.State.Health.Status)
+ return false, fmt.Errorf("container %s had unexpected health status %q", name, ctr.State.Health.Status)
}
}
return true, nil
}
-func (s *composeService) isServiceCompleted(ctx context.Context, project *types.Project, dep string) (bool, int, error) {
- containers, err := s.getContainers(ctx, project.Name, oneOffExclude, true, dep)
- if err != nil {
- return false, 0, err
- }
+func (s *composeService) isServiceCompleted(ctx context.Context, containers Containers) (bool, int, error) {
for _, c := range containers {
- container, err := s.apiClient().ContainerInspect(ctx, c.ID)
+ ctr, err := s.apiClient().ContainerInspect(ctx, c.ID)
if err != nil {
return false, 0, err
}
- if container.State != nil && container.State.Status == "exited" {
- return true, container.State.ExitCode, nil
+ if ctr.State != nil && ctr.State.Status == container.StateExited {
+ return true, ctr.State.ExitCode, nil
}
}
return false, 0, nil
}
-func (s *composeService) startService(ctx context.Context, project *types.Project, service types.ServiceConfig) error {
+func (s *composeService) startService(ctx context.Context,
+ project *types.Project, service types.ServiceConfig,
+ containers Containers, listener api.ContainerEventListener,
+ timeout time.Duration,
+) error {
if service.Deploy != nil && service.Deploy.Replicas != nil && *service.Deploy.Replicas == 0 {
return nil
}
- err := s.waitDependencies(ctx, project, service.DependsOn)
- if err != nil {
- return err
- }
- containers, err := s.apiClient().ContainerList(ctx, moby.ContainerListOptions{
- Filters: filters.NewArgs(
- projectFilter(project.Name),
- serviceFilter(service.Name),
- oneOffFilter(false),
- ),
- All: true,
- })
+ err := s.waitDependencies(ctx, project, service.Name, service.DependsOn, containers, timeout)
if err != nil {
return err
}
if len(containers) == 0 {
- if scale, err := getScale(service); err != nil && scale == 0 {
+ if service.GetScale() == 0 {
return nil
}
return fmt.Errorf("service %q has no container to start", service.Name)
}
- w := progress.ContextWriter(ctx)
- eg, ctx := errgroup.WithContext(ctx)
- for _, container := range containers {
- if container.State == ContainerRunning {
+ for _, ctr := range containers.filter(isService(service.Name)) {
+ if ctr.State == container.StateRunning {
continue
}
- container := container
- eg.Go(func() error {
- eventName := getContainerProgressName(container)
- w.Event(progress.StartingEvent(eventName))
- err := s.apiClient().ContainerStart(ctx, container.ID, moby.ContainerStartOptions{})
- if err == nil {
- w.Event(progress.StartedEvent(eventName))
- }
+
+ err = s.injectSecrets(ctx, project, service, ctr.ID)
+ if err != nil {
return err
- })
+ }
+
+ err = s.injectConfigs(ctx, project, service, ctr.ID)
+ if err != nil {
+ return err
+ }
+
+ eventName := getContainerProgressName(ctr)
+ s.events.On(startingEvent(eventName))
+ err = s.apiClient().ContainerStart(ctx, ctr.ID, container.StartOptions{})
+ if err != nil {
+ return err
+ }
+
+ for _, hook := range service.PostStart {
+ err = s.runHook(ctx, ctr, service, hook, listener)
+ if err != nil {
+ return err
+ }
+ }
+
+ s.events.On(startedEvent(eventName))
+ }
+ return nil
+}
+
+func mergeLabels(ls ...types.Labels) types.Labels {
+ merged := types.Labels{}
+ for _, l := range ls {
+ maps.Copy(merged, l)
}
- return eg.Wait()
+ return merged
}
diff --git a/pkg/compose/convergence_test.go b/pkg/compose/convergence_test.go
index bd41db30a5d..637be02961a 100644
--- a/pkg/compose/convergence_test.go
+++ b/pkg/compose/convergence_test.go
@@ -22,51 +22,60 @@ import (
"strings"
"testing"
- "github.com/compose-spec/compose-go/types"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/mocks"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli/config/configfile"
moby "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
- "github.com/golang/mock/gomock"
- "gotest.tools/assert"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/go-connections/nat"
+ "go.uber.org/mock/gomock"
+ "gotest.tools/v3/assert"
+
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/mocks"
)
func TestContainerName(t *testing.T) {
s := types.ServiceConfig{
Name: "testservicename",
ContainerName: "testcontainername",
- Scale: 1,
+ Scale: intPtr(1),
Deploy: &types.DeployConfig{},
}
ret, err := getScale(s)
assert.NilError(t, err)
- assert.Equal(t, ret, s.Scale)
+ assert.Equal(t, ret, *s.Scale)
- var zero uint64 // = 0
- s.Deploy.Replicas = &zero
+ s.Scale = intPtr(0)
ret, err = getScale(s)
assert.NilError(t, err)
- assert.Equal(t, ret, int(*s.Deploy.Replicas))
+ assert.Equal(t, ret, *s.Scale)
- var two uint64 = 2
- s.Deploy.Replicas = &two
+ s.Scale = intPtr(2)
_, err = getScale(s)
assert.Error(t, err, fmt.Sprintf(doubledContainerNameWarning, s.Name, s.ContainerName))
}
+func intPtr(i int) *int {
+ return &i
+}
+
func TestServiceLinks(t *testing.T) {
const dbContainerName = "/" + testProject + "-db-1"
const webContainerName = "/" + testProject + "-web-1"
s := types.ServiceConfig{
Name: "web",
- Scale: 1,
+ Scale: intPtr(1),
}
- containerListOptions := moby.ContainerListOptions{
+ containerListOptions := container.ListOptions{
Filters: filters.NewArgs(
projectFilter(testProject),
serviceFilter("db"),
oneOffFilter(false),
+ hasConfigHashLabel(),
),
All: true,
}
@@ -77,15 +86,16 @@ func TestServiceLinks(t *testing.T) {
apiClient := mocks.NewMockAPIClient(mockCtrl)
cli := mocks.NewMockCli(mockCtrl)
- tested.dockerCli = cli
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
cli.EXPECT().Client().Return(apiClient).AnyTimes()
s.Links = []string{"db"}
c := testContainer("db", dbContainerName, false)
- apiClient.EXPECT().ContainerList(gomock.Any(), containerListOptions).Return([]moby.Container{c}, nil)
+ apiClient.EXPECT().ContainerList(gomock.Any(), containerListOptions).Return([]container.Summary{c}, nil)
- links, err := tested.getLinks(context.Background(), testProject, s, 1)
+ links, err := tested.(*composeService).getLinks(context.Background(), testProject, s, 1)
assert.NilError(t, err)
assert.Equal(t, len(links), 3)
@@ -99,15 +109,16 @@ func TestServiceLinks(t *testing.T) {
defer mockCtrl.Finish()
apiClient := mocks.NewMockAPIClient(mockCtrl)
cli := mocks.NewMockCli(mockCtrl)
- tested.dockerCli = cli
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
cli.EXPECT().Client().Return(apiClient).AnyTimes()
s.Links = []string{"db:db"}
c := testContainer("db", dbContainerName, false)
- apiClient.EXPECT().ContainerList(gomock.Any(), containerListOptions).Return([]moby.Container{c}, nil)
- links, err := tested.getLinks(context.Background(), testProject, s, 1)
+ apiClient.EXPECT().ContainerList(gomock.Any(), containerListOptions).Return([]container.Summary{c}, nil)
+ links, err := tested.(*composeService).getLinks(context.Background(), testProject, s, 1)
assert.NilError(t, err)
assert.Equal(t, len(links), 3)
@@ -121,15 +132,16 @@ func TestServiceLinks(t *testing.T) {
defer mockCtrl.Finish()
apiClient := mocks.NewMockAPIClient(mockCtrl)
cli := mocks.NewMockCli(mockCtrl)
- tested.dockerCli = cli
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
cli.EXPECT().Client().Return(apiClient).AnyTimes()
s.Links = []string{"db:dbname"}
c := testContainer("db", dbContainerName, false)
- apiClient.EXPECT().ContainerList(gomock.Any(), containerListOptions).Return([]moby.Container{c}, nil)
+ apiClient.EXPECT().ContainerList(gomock.Any(), containerListOptions).Return([]container.Summary{c}, nil)
- links, err := tested.getLinks(context.Background(), testProject, s, 1)
+ links, err := tested.(*composeService).getLinks(context.Background(), testProject, s, 1)
assert.NilError(t, err)
assert.Equal(t, len(links), 3)
@@ -143,16 +155,17 @@ func TestServiceLinks(t *testing.T) {
defer mockCtrl.Finish()
apiClient := mocks.NewMockAPIClient(mockCtrl)
cli := mocks.NewMockCli(mockCtrl)
- tested.dockerCli = cli
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
cli.EXPECT().Client().Return(apiClient).AnyTimes()
s.Links = []string{"db:dbname"}
s.ExternalLinks = []string{"db1:db2"}
c := testContainer("db", dbContainerName, false)
- apiClient.EXPECT().ContainerList(gomock.Any(), containerListOptions).Return([]moby.Container{c}, nil)
+ apiClient.EXPECT().ContainerList(gomock.Any(), containerListOptions).Return([]container.Summary{c}, nil)
- links, err := tested.getLinks(context.Background(), testProject, s, 1)
+ links, err := tested.(*composeService).getLinks(context.Background(), testProject, s, 1)
assert.NilError(t, err)
assert.Equal(t, len(links), 4)
@@ -169,7 +182,8 @@ func TestServiceLinks(t *testing.T) {
defer mockCtrl.Finish()
apiClient := mocks.NewMockAPIClient(mockCtrl)
cli := mocks.NewMockCli(mockCtrl)
- tested.dockerCli = cli
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
cli.EXPECT().Client().Return(apiClient).AnyTimes()
s.Links = []string{}
@@ -177,17 +191,18 @@ func TestServiceLinks(t *testing.T) {
s.Labels = s.Labels.Add(api.OneoffLabel, "True")
c := testContainer("web", webContainerName, true)
- containerListOptionsOneOff := moby.ContainerListOptions{
+ containerListOptionsOneOff := container.ListOptions{
Filters: filters.NewArgs(
projectFilter(testProject),
serviceFilter("web"),
oneOffFilter(false),
+ hasConfigHashLabel(),
),
All: true,
}
- apiClient.EXPECT().ContainerList(gomock.Any(), containerListOptionsOneOff).Return([]moby.Container{c}, nil)
+ apiClient.EXPECT().ContainerList(gomock.Any(), containerListOptionsOneOff).Return([]container.Summary{c}, nil)
- links, err := tested.getLinks(context.Background(), testProject, s, 1)
+ links, err := tested.(*composeService).getLinks(context.Background(), testProject, s, 1)
assert.NilError(t, err)
assert.Equal(t, len(links), 3)
@@ -203,27 +218,210 @@ func TestWaitDependencies(t *testing.T) {
apiClient := mocks.NewMockAPIClient(mockCtrl)
cli := mocks.NewMockCli(mockCtrl)
- tested.dockerCli = cli
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
cli.EXPECT().Client().Return(apiClient).AnyTimes()
t.Run("should skip dependencies with scale 0", func(t *testing.T) {
- dbService := types.ServiceConfig{Name: "db", Scale: 0}
- redisService := types.ServiceConfig{Name: "redis", Scale: 0}
- project := types.Project{Name: strings.ToLower(testProject), Services: []types.ServiceConfig{dbService, redisService}}
+ dbService := types.ServiceConfig{Name: "db", Scale: intPtr(0)}
+ redisService := types.ServiceConfig{Name: "redis", Scale: intPtr(0)}
+ project := types.Project{Name: strings.ToLower(testProject), Services: types.Services{
+ "db": dbService,
+ "redis": redisService,
+ }}
dependencies := types.DependsOnConfig{
"db": {Condition: ServiceConditionRunningOrHealthy},
"redis": {Condition: ServiceConditionRunningOrHealthy},
}
- assert.NilError(t, tested.waitDependencies(context.Background(), &project, dependencies))
+ assert.NilError(t, tested.(*composeService).waitDependencies(context.Background(), &project, "", dependencies, nil, 0))
})
t.Run("should skip dependencies with condition service_started", func(t *testing.T) {
- dbService := types.ServiceConfig{Name: "db", Scale: 1}
- redisService := types.ServiceConfig{Name: "redis", Scale: 1}
- project := types.Project{Name: strings.ToLower(testProject), Services: []types.ServiceConfig{dbService, redisService}}
+ dbService := types.ServiceConfig{Name: "db", Scale: intPtr(1)}
+ redisService := types.ServiceConfig{Name: "redis", Scale: intPtr(1)}
+ project := types.Project{Name: strings.ToLower(testProject), Services: types.Services{
+ "db": dbService,
+ "redis": redisService,
+ }}
dependencies := types.DependsOnConfig{
- "db": {Condition: types.ServiceConditionStarted},
- "redis": {Condition: types.ServiceConditionStarted},
+ "db": {Condition: types.ServiceConditionStarted, Required: true},
+ "redis": {Condition: types.ServiceConditionStarted, Required: true},
}
- assert.NilError(t, tested.waitDependencies(context.Background(), &project, dependencies))
+ assert.NilError(t, tested.(*composeService).waitDependencies(context.Background(), &project, "", dependencies, nil, 0))
+ })
+}
+
+func TestCreateMobyContainer(t *testing.T) {
+ t.Run("connects container networks one by one if API <1.44", func(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ apiClient := mocks.NewMockAPIClient(mockCtrl)
+ cli := mocks.NewMockCli(mockCtrl)
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
+ cli.EXPECT().Client().Return(apiClient).AnyTimes()
+ cli.EXPECT().ConfigFile().Return(&configfile.ConfigFile{}).AnyTimes()
+ apiClient.EXPECT().DaemonHost().Return("").AnyTimes()
+ apiClient.EXPECT().ImageInspect(gomock.Any(), gomock.Any()).Return(image.InspectResponse{}, nil).AnyTimes()
+ // force `RuntimeVersion` to fetch again
+ runtimeVersion = runtimeVersionCache{}
+ apiClient.EXPECT().ServerVersion(gomock.Any()).Return(moby.Version{
+ APIVersion: "1.43",
+ }, nil).AnyTimes()
+
+ service := types.ServiceConfig{
+ Name: "test",
+ Networks: map[string]*types.ServiceNetworkConfig{
+ "a": {
+ Priority: 10,
+ },
+ "b": {
+ Priority: 100,
+ },
+ },
+ }
+ project := types.Project{
+ Name: "bork",
+ Services: types.Services{
+ "test": service,
+ },
+ Networks: types.Networks{
+ "a": types.NetworkConfig{
+ Name: "a-moby-name",
+ },
+ "b": types.NetworkConfig{
+ Name: "b-moby-name",
+ },
+ },
+ }
+
+ var falseBool bool
+ apiClient.EXPECT().ContainerCreate(gomock.Any(), gomock.Any(), gomock.Eq(
+ &container.HostConfig{
+ PortBindings: nat.PortMap{},
+ ExtraHosts: []string{},
+ Tmpfs: map[string]string{},
+ Resources: container.Resources{
+ OomKillDisable: &falseBool,
+ },
+ NetworkMode: "b-moby-name",
+ }), gomock.Eq(
+ &network.NetworkingConfig{
+ EndpointsConfig: map[string]*network.EndpointSettings{
+ "b-moby-name": {
+ IPAMConfig: &network.EndpointIPAMConfig{},
+ Aliases: []string{"bork-test-0"},
+ },
+ },
+ }), gomock.Any(), gomock.Any()).Times(1).Return(
+ container.CreateResponse{
+ ID: "an-id",
+ }, nil)
+
+ apiClient.EXPECT().ContainerInspect(gomock.Any(), gomock.Eq("an-id")).Times(1).Return(
+ container.InspectResponse{
+ ContainerJSONBase: &container.ContainerJSONBase{
+ ID: "an-id",
+ Name: "a-name",
+ },
+ Config: &container.Config{},
+ NetworkSettings: &container.NetworkSettings{},
+ }, nil)
+
+ apiClient.EXPECT().NetworkConnect(gomock.Any(), "a-moby-name", "an-id", gomock.Eq(
+ &network.EndpointSettings{
+ IPAMConfig: &network.EndpointIPAMConfig{},
+ Aliases: []string{"bork-test-0"},
+ }))
+
+ _, err = tested.(*composeService).createMobyContainer(context.Background(), &project, service, "test", 0, nil, createOptions{
+ Labels: make(types.Labels),
+ })
+ assert.NilError(t, err)
+ })
+
+ t.Run("includes all container networks in ContainerCreate call if API >=1.44", func(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ apiClient := mocks.NewMockAPIClient(mockCtrl)
+ cli := mocks.NewMockCli(mockCtrl)
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
+ cli.EXPECT().Client().Return(apiClient).AnyTimes()
+ cli.EXPECT().ConfigFile().Return(&configfile.ConfigFile{}).AnyTimes()
+ apiClient.EXPECT().DaemonHost().Return("").AnyTimes()
+ apiClient.EXPECT().ImageInspect(gomock.Any(), gomock.Any()).Return(image.InspectResponse{}, nil).AnyTimes()
+ // force `RuntimeVersion` to fetch fresh version
+ runtimeVersion = runtimeVersionCache{}
+ apiClient.EXPECT().ServerVersion(gomock.Any()).Return(moby.Version{
+ APIVersion: "1.44",
+ }, nil).AnyTimes()
+
+ service := types.ServiceConfig{
+ Name: "test",
+ Networks: map[string]*types.ServiceNetworkConfig{
+ "a": {
+ Priority: 10,
+ },
+ "b": {
+ Priority: 100,
+ },
+ },
+ }
+ project := types.Project{
+ Name: "bork",
+ Services: types.Services{
+ "test": service,
+ },
+ Networks: types.Networks{
+ "a": types.NetworkConfig{
+ Name: "a-moby-name",
+ },
+ "b": types.NetworkConfig{
+ Name: "b-moby-name",
+ },
+ },
+ }
+
+ var falseBool bool
+ apiClient.EXPECT().ContainerCreate(gomock.Any(), gomock.Any(), gomock.Eq(
+ &container.HostConfig{
+ PortBindings: nat.PortMap{},
+ ExtraHosts: []string{},
+ Tmpfs: map[string]string{},
+ Resources: container.Resources{
+ OomKillDisable: &falseBool,
+ },
+ NetworkMode: "b-moby-name",
+ }), gomock.Eq(
+ &network.NetworkingConfig{
+ EndpointsConfig: map[string]*network.EndpointSettings{
+ "a-moby-name": {
+ IPAMConfig: &network.EndpointIPAMConfig{},
+ Aliases: []string{"bork-test-0"},
+ },
+ "b-moby-name": {
+ IPAMConfig: &network.EndpointIPAMConfig{},
+ Aliases: []string{"bork-test-0"},
+ },
+ },
+ }), gomock.Any(), gomock.Any()).Times(1).Return(
+ container.CreateResponse{
+ ID: "an-id",
+ }, nil)
+
+ apiClient.EXPECT().ContainerInspect(gomock.Any(), gomock.Eq("an-id")).Times(1).Return(
+ container.InspectResponse{
+ ContainerJSONBase: &container.ContainerJSONBase{
+ ID: "an-id",
+ Name: "a-name",
+ },
+ Config: &container.Config{},
+ NetworkSettings: &container.NetworkSettings{},
+ }, nil)
+
+ _, err = tested.(*composeService).createMobyContainer(context.Background(), &project, service, "test", 0, nil, createOptions{
+ Labels: make(types.Labels),
+ })
+ assert.NilError(t, err)
})
}
diff --git a/pkg/compose/convert.go b/pkg/compose/convert.go
index 6f092ee2d51..17d5a901869 100644
--- a/pkg/compose/convert.go
+++ b/pkg/compose/convert.go
@@ -17,11 +17,14 @@
package compose
import (
+ "context"
+ "errors"
"fmt"
"time"
- compose "github.com/compose-spec/compose-go/types"
+ compose "github.com/compose-spec/compose-go/v2/types"
"github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/versions"
)
// ToMobyEnv convert into []string
@@ -38,9 +41,9 @@ func ToMobyEnv(environment compose.MappingWithEquals) []string {
}
// ToMobyHealthCheck convert into container.HealthConfig
-func ToMobyHealthCheck(check *compose.HealthCheckConfig) *container.HealthConfig {
+func (s *composeService) ToMobyHealthCheck(ctx context.Context, check *compose.HealthCheckConfig) (*container.HealthConfig, error) {
if check == nil {
- return nil
+ return nil, nil
}
var (
interval time.Duration
@@ -64,13 +67,30 @@ func ToMobyHealthCheck(check *compose.HealthCheckConfig) *container.HealthConfig
if check.Disable {
test = []string{"NONE"}
}
- return &container.HealthConfig{
- Test: test,
- Interval: interval,
- Timeout: timeout,
- StartPeriod: period,
- Retries: retries,
+ var startInterval time.Duration
+ if check.StartInterval != nil {
+ version, err := s.RuntimeVersion(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if versions.LessThan(version, "1.44") {
+ return nil, errors.New("can't set healthcheck.start_interval as feature require Docker Engine v25 or later")
+ } else {
+ startInterval = time.Duration(*check.StartInterval)
+ }
+ if check.StartPeriod == nil {
+ // see https://github.com/moby/moby/issues/48874
+ return nil, errors.New("healthcheck.start_interval requires healthcheck.start_period to be set")
+ }
}
+ return &container.HealthConfig{
+ Test: test,
+ Interval: interval,
+ Timeout: timeout,
+ StartPeriod: period,
+ StartInterval: startInterval,
+ Retries: retries,
+ }, nil
}
// ToSeconds convert into seconds
diff --git a/pkg/compose/cp.go b/pkg/compose/cp.go
index b2123e601ab..f236a2a1a77 100644
--- a/pkg/compose/cp.go
+++ b/pkg/compose/cp.go
@@ -18,6 +18,7 @@ package compose
import (
"context"
+ "errors"
"fmt"
"io"
"os"
@@ -27,11 +28,9 @@ import (
"golang.org/x/sync/errgroup"
"github.com/docker/cli/cli/command"
- "github.com/docker/compose/v2/pkg/api"
- moby "github.com/docker/docker/api/types"
- "github.com/docker/docker/pkg/archive"
- "github.com/docker/docker/pkg/system"
- "github.com/pkg/errors"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/api/types/container"
+ "github.com/moby/go-archive"
)
type copyDirection int
@@ -42,59 +41,106 @@ const (
acrossServices = fromService | toService
)
-func (s *composeService) Copy(ctx context.Context, project string, opts api.CopyOptions) error {
- srcService, srcPath := splitCpArg(opts.Source)
- destService, dstPath := splitCpArg(opts.Destination)
+func (s *composeService) Copy(ctx context.Context, projectName string, options api.CopyOptions) error {
+ return Run(ctx, func(ctx context.Context) error {
+ return s.copy(ctx, projectName, options)
+ }, "copy", s.events)
+}
+
+func (s *composeService) copy(ctx context.Context, projectName string, options api.CopyOptions) error {
+ projectName = strings.ToLower(projectName)
+ srcService, srcPath := splitCpArg(options.Source)
+ destService, dstPath := splitCpArg(options.Destination)
var direction copyDirection
var serviceName string
+ var copyFunc func(ctx context.Context, containerID string, srcPath string, dstPath string, opts api.CopyOptions) error
if srcService != "" {
direction |= fromService
serviceName = srcService
-
- // copying from multiple containers of a services doesn't make sense.
- if opts.All {
- return errors.New("cannot use the --all flag when copying from a service")
- }
+ copyFunc = s.copyFromContainer
}
if destService != "" {
direction |= toService
serviceName = destService
+ copyFunc = s.copyToContainer
}
-
- containers, err := s.getContainers(ctx, project, oneOffExclude, true, serviceName)
- if err != nil {
- return err
+ if direction == acrossServices {
+ return errors.New("copying between services is not supported")
}
- if len(containers) < 1 {
- return fmt.Errorf("no container found for service %q", serviceName)
+ if direction == 0 {
+ return errors.New("unknown copy direction")
}
- if !opts.All {
- containers = containers.filter(indexed(opts.Index))
+ containers, err := s.listContainersTargetedForCopy(ctx, projectName, options, direction, serviceName)
+ if err != nil {
+ return err
}
g := errgroup.Group{}
- for _, container := range containers {
- containerID := container.ID
+ for _, cont := range containers {
+ ctr := cont
g.Go(func() error {
- switch direction {
- case fromService:
- return s.copyFromContainer(ctx, containerID, srcPath, dstPath, opts)
- case toService:
- return s.copyToContainer(ctx, containerID, srcPath, dstPath, opts)
- case acrossServices:
- return errors.New("copying between services is not supported")
- default:
- return errors.New("unknown copy direction")
+ name := getCanonicalContainerName(ctr)
+ var msg string
+ if direction == fromService {
+ msg = fmt.Sprintf("%s:%s to %s", name, srcPath, dstPath)
+ } else {
+ msg = fmt.Sprintf("%s to %s:%s", srcPath, name, dstPath)
+ }
+ s.events.On(api.Resource{
+ ID: name,
+ Text: api.StatusCopying,
+ Details: msg,
+ Status: api.Working,
+ })
+ if err := copyFunc(ctx, ctr.ID, srcPath, dstPath, options); err != nil {
+ return err
}
+ s.events.On(api.Resource{
+ ID: name,
+ Text: api.StatusCopied,
+ Details: msg,
+ Status: api.Done,
+ })
+ return nil
})
}
return g.Wait()
}
+func (s *composeService) listContainersTargetedForCopy(ctx context.Context, projectName string, options api.CopyOptions, direction copyDirection, serviceName string) (Containers, error) {
+ var containers Containers
+ var err error
+ switch {
+ case options.Index > 0:
+ ctr, err := s.getSpecifiedContainer(ctx, projectName, oneOffExclude, true, serviceName, options.Index)
+ if err != nil {
+ return nil, err
+ }
+ return append(containers, ctr), nil
+ default:
+ withOneOff := oneOffExclude
+ if options.All {
+ withOneOff = oneOffInclude
+ }
+ containers, err = s.getContainers(ctx, projectName, withOneOff, true, serviceName)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(containers) < 1 {
+ return nil, fmt.Errorf("no container found for service %q", serviceName)
+ }
+ if direction == fromService {
+ return containers[:1], err
+ }
+ return containers, err
+ }
+}
+
func (s *composeService) copyToContainer(ctx context.Context, containerID string, srcPath string, dstPath string, opts api.CopyOptions) error {
var err error
if srcPath != "-" {
@@ -112,7 +158,7 @@ func (s *composeService) copyToContainer(ctx context.Context, containerID string
// If the destination is a symbolic link, we should evaluate it.
if err == nil && dstStat.Mode&os.ModeSymlink != 0 {
linkTarget := dstStat.LinkTarget
- if !system.IsAbs(linkTarget) {
+ if !isAbs(linkTarget) {
// Join with the parent directory.
dstParent, _ := archive.SplitPathDirEntry(dstPath)
linkTarget = filepath.Join(dstParent, linkTarget)
@@ -124,7 +170,7 @@ func (s *composeService) copyToContainer(ctx context.Context, containerID string
// Validate the destination path
if err := command.ValidateOutputPathFileMode(dstStat.Mode); err != nil {
- return errors.Wrapf(err, `destination "%s:%s" must be a directory or a regular file`, containerID, dstPath)
+ return fmt.Errorf(`destination "%s:%s" must be a directory or a regular file: %w`, containerID, dstPath, err)
}
// Ignore any error and assume that the parent directory of the destination
@@ -146,7 +192,7 @@ func (s *composeService) copyToContainer(ctx context.Context, containerID string
content = s.stdin()
resolvedDstPath = dstInfo.Path
if !dstInfo.IsDir {
- return errors.Errorf("destination \"%s:%s\" must be a directory", containerID, dstPath)
+ return fmt.Errorf("destination \"%s:%s\" must be a directory", containerID, dstPath)
}
} else {
// Prepare source copy info.
@@ -173,17 +219,20 @@ func (s *composeService) copyToContainer(ctx context.Context, containerID string
// extracted. This function also infers from the source and destination
// info which directory to extract to, which may be the parent of the
// destination that the user specified.
- dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo)
- if err != nil {
- return err
- }
- defer preparedArchive.Close() //nolint:errcheck
+ // Don't create the archive if running in Dry Run mode
+ if !s.dryRun {
+ dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo)
+ if err != nil {
+ return err
+ }
+ defer preparedArchive.Close() //nolint:errcheck
- resolvedDstPath = dstDir
- content = preparedArchive
+ resolvedDstPath = dstDir
+ content = preparedArchive
+ }
}
- options := moby.CopyToContainerOptions{
+ options := container.CopyToContainerOptions{
AllowOverwriteDirWithFile: false,
CopyUIDGID: opts.CopyUIDGID,
}
@@ -212,7 +261,7 @@ func (s *composeService) copyFromContainer(ctx context.Context, containerID, src
// If the destination is a symbolic link, we should follow it.
if err == nil && srcStat.Mode&os.ModeSymlink != 0 {
linkTarget := srcStat.LinkTarget
- if !system.IsAbs(linkTarget) {
+ if !isAbs(linkTarget) {
// Join with the parent directory.
srcParent, _ := archive.SplitPathDirEntry(srcPath)
linkTarget = filepath.Join(srcParent, linkTarget)
@@ -242,7 +291,7 @@ func (s *composeService) copyFromContainer(ctx context.Context, containerID, src
}
preArchive := content
- if len(srcInfo.RebaseName) != 0 {
+ if srcInfo.RebaseName != "" {
_, srcBase := archive.SplitPathDirEntry(srcInfo.Path)
preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName)
}
@@ -250,8 +299,20 @@ func (s *composeService) copyFromContainer(ctx context.Context, containerID, src
return archive.CopyTo(preArchive, srcInfo, dstPath)
}
-func splitCpArg(arg string) (container, path string) {
- if system.IsAbs(arg) {
+// IsAbs is a platform-agnostic wrapper for filepath.IsAbs.
+//
+// On Windows, golang filepath.IsAbs does not consider a path \windows\system32
+// as absolute as it doesn't start with a drive-letter/colon combination. However,
+// in docker we need to verify things such as WORKDIR /windows/system32 in
+// a Dockerfile (which gets translated to \windows\system32 when being processed
+// by the daemon). This SHOULD be treated as absolute from a docker processing
+// perspective.
+func isAbs(path string) bool {
+ return filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator))
+}
+
+func splitCpArg(arg string) (ctr, path string) {
+ if isAbs(arg) {
// Explicit local absolute path, e.g., `C:\foo` or `/foo`.
return "", arg
}
@@ -269,7 +330,7 @@ func splitCpArg(arg string) (container, path string) {
func resolveLocalPath(localPath string) (absPath string, err error) {
if absPath, err = filepath.Abs(localPath); err != nil {
- return
+ return absPath, err
}
- return archive.PreserveTrailingDotOrSeparator(absPath, localPath, filepath.Separator), nil
+ return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil
}
diff --git a/pkg/compose/create.go b/pkg/compose/create.go
index 637d4c5ea3f..59dc568714d 100644
--- a/pkg/compose/create.go
+++ b/pkg/compose/create.go
@@ -20,36 +20,49 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
- "io/ioutil"
- "path"
+ "os"
"path/filepath"
+ "slices"
"strconv"
"strings"
- "github.com/compose-spec/compose-go/types"
- moby "github.com/docker/docker/api/types"
+ "github.com/compose-spec/compose-go/v2/paths"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/containerd/errdefs"
"github.com/docker/docker/api/types/blkiodev"
"github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
- "github.com/docker/docker/api/types/strslice"
- volume_api "github.com/docker/docker/api/types/volume"
- "github.com/docker/docker/errdefs"
+ "github.com/docker/docker/api/types/versions"
+ volumetypes "github.com/docker/docker/api/types/volume"
"github.com/docker/go-connections/nat"
- "github.com/docker/go-units"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ cdi "tags.cncf.io/container-device-interface/pkg/parser"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/progress"
- "github.com/docker/compose/v2/pkg/utils"
+ "github.com/docker/compose/v5/pkg/api"
)
-func (s *composeService) Create(ctx context.Context, project *types.Project, options api.CreateOptions) error {
- return progress.Run(ctx, func(ctx context.Context) error {
- return s.create(ctx, project, options)
- })
+type createOptions struct {
+ AutoRemove bool
+ AttachStdin bool
+ UseNetworkAliases bool
+ Labels types.Labels
+}
+
+type createConfigs struct {
+ Container *container.Config
+ Host *container.HostConfig
+ Network *network.NetworkingConfig
+ Links []string
+}
+
+func (s *composeService) Create(ctx context.Context, project *types.Project, createOpts api.CreateOptions) error {
+ return Run(ctx, func(ctx context.Context) error {
+ return s.create(ctx, project, createOpts)
+ }, "create", s.events)
}
func (s *composeService) create(ctx context.Context, project *types.Project, options api.CreateOptions) error {
@@ -57,42 +70,42 @@ func (s *composeService) create(ctx context.Context, project *types.Project, opt
options.Services = project.ServiceNames()
}
- var observedState Containers
- observedState, err := s.getContainers(ctx, project.Name, oneOffInclude, true)
+ err := project.CheckContainerNameUnicity()
if err != nil {
return err
}
- err = s.ensureImagesExists(ctx, project, options.QuietPull)
+ err = s.ensureImagesExists(ctx, project, options.Build, options.QuietPull)
if err != nil {
return err
}
- prepareNetworks(project)
-
- err = prepareVolumes(project)
+ err = s.ensureModels(ctx, project, options.QuietPull)
if err != nil {
return err
}
- if err := s.ensureNetworks(ctx, project.Networks); err != nil {
+ prepareNetworks(project)
+
+ networks, err := s.ensureNetworks(ctx, project)
+ if err != nil {
return err
}
- if err := s.ensureProjectVolumes(ctx, project); err != nil {
+ volumes, err := s.ensureProjectVolumes(ctx, project)
+ if err != nil {
return err
}
- allServices := project.AllServices()
- allServiceNames := []string{}
- for _, service := range allServices {
- allServiceNames = append(allServiceNames, service.Name)
+ var observedState Containers
+ observedState, err = s.getContainers(ctx, project.Name, oneOffInclude, true)
+ if err != nil {
+ return err
}
- orphans := observedState.filter(isNotService(allServiceNames...))
+ orphans := observedState.filter(isOrphaned(project))
if len(orphans) > 0 && !options.IgnoreOrphans {
if options.RemoveOrphans {
- w := progress.ContextWriter(ctx)
- err := s.removeContainers(ctx, w, orphans, nil, false)
+ err := s.removeContainers(ctx, orphans, nil, nil, false)
if err != nil {
return err
}
@@ -104,145 +117,73 @@ func (s *composeService) create(ctx context.Context, project *types.Project, opt
}
}
- err = prepareServicesDependsOn(project)
+ // Temporary implementation of use_api_socket until we get actual support inside docker engine
+ project, err = s.useAPISocket(project)
if err != nil {
return err
}
- return newConvergence(options.Services, observedState, s).apply(ctx, project, options)
-}
-
-func prepareVolumes(p *types.Project) error {
- for i := range p.Services {
- volumesFrom, dependServices, err := getVolumesFrom(p, p.Services[i].VolumesFrom)
- if err != nil {
- return err
- }
- p.Services[i].VolumesFrom = volumesFrom
- if len(dependServices) > 0 {
- if p.Services[i].DependsOn == nil {
- p.Services[i].DependsOn = make(types.DependsOnConfig, len(dependServices))
- }
- for _, service := range p.Services {
- if utils.StringContains(dependServices, service.Name) {
- p.Services[i].DependsOn[service.Name] = types.ServiceDependency{
- Condition: types.ServiceConditionStarted,
- }
- }
- }
- }
- }
- return nil
+ return newConvergence(options.Services, observedState, networks, volumes, s).apply(ctx, project, options)
}
func prepareNetworks(project *types.Project) {
- for k, network := range project.Networks {
- network.Labels = network.Labels.Add(api.NetworkLabel, k)
- network.Labels = network.Labels.Add(api.ProjectLabel, project.Name)
- network.Labels = network.Labels.Add(api.VersionLabel, api.ComposeVersion)
- project.Networks[k] = network
+ for k, nw := range project.Networks {
+ nw.CustomLabels = nw.CustomLabels.
+ Add(api.NetworkLabel, k).
+ Add(api.ProjectLabel, project.Name).
+ Add(api.VersionLabel, api.ComposeVersion)
+ project.Networks[k] = nw
}
}
-func prepareServicesDependsOn(p *types.Project) error {
- for i, service := range p.Services {
- var dependencies []string
- networkDependency := getDependentServiceFromMode(service.NetworkMode)
- if networkDependency != "" {
- dependencies = append(dependencies, networkDependency)
- }
-
- ipcDependency := getDependentServiceFromMode(service.Ipc)
- if ipcDependency != "" {
- dependencies = append(dependencies, ipcDependency)
- }
-
- pidDependency := getDependentServiceFromMode(service.Pid)
- if pidDependency != "" {
- dependencies = append(dependencies, pidDependency)
- }
-
- for _, vol := range service.VolumesFrom {
- spec := strings.Split(vol, ":")
- if len(spec) == 0 {
- continue
- }
- if spec[0] == "container" {
- continue
- }
- dependencies = append(dependencies, spec[0])
- }
-
- if len(dependencies) == 0 {
- continue
- }
- if service.DependsOn == nil {
- service.DependsOn = make(types.DependsOnConfig)
- }
- deps, err := p.GetServices(dependencies...)
+func (s *composeService) ensureNetworks(ctx context.Context, project *types.Project) (map[string]string, error) {
+ networks := map[string]string{}
+ for name, nw := range project.Networks {
+ id, err := s.ensureNetwork(ctx, project, name, &nw)
if err != nil {
- return err
- }
- for _, d := range deps {
- if _, ok := service.DependsOn[d.Name]; !ok {
- service.DependsOn[d.Name] = types.ServiceDependency{
- Condition: types.ServiceConditionStarted,
- }
- }
- }
- p.Services[i] = service
- }
- return nil
-}
-
-func (s *composeService) ensureNetworks(ctx context.Context, networks types.Networks) error {
- for _, network := range networks {
- err := s.ensureNetwork(ctx, network)
- if err != nil {
- return err
+ return nil, err
}
+ networks[name] = id
+ project.Networks[name] = nw
}
- return nil
+ return networks, nil
}
-func (s *composeService) ensureProjectVolumes(ctx context.Context, project *types.Project) error {
+func (s *composeService) ensureProjectVolumes(ctx context.Context, project *types.Project) (map[string]string, error) {
+ ids := map[string]string{}
for k, volume := range project.Volumes {
- volume.Labels = volume.Labels.Add(api.VolumeLabel, k)
- volume.Labels = volume.Labels.Add(api.ProjectLabel, project.Name)
- volume.Labels = volume.Labels.Add(api.VersionLabel, api.ComposeVersion)
- err := s.ensureVolume(ctx, volume, project.Name)
+ volume.CustomLabels = volume.CustomLabels.Add(api.VolumeLabel, k)
+ volume.CustomLabels = volume.CustomLabels.Add(api.ProjectLabel, project.Name)
+ volume.CustomLabels = volume.CustomLabels.Add(api.VersionLabel, api.ComposeVersion)
+ id, err := s.ensureVolume(ctx, k, volume, project)
if err != nil {
- return err
+ return nil, err
}
+ ids[k] = id
}
- return nil
-}
-func getImageName(service types.ServiceConfig, projectName string) string {
- imageName := service.Image
- if imageName == "" {
- imageName = projectName + "_" + service.Name
- }
- return imageName
+ return ids, nil
}
-func (s *composeService) getCreateOptions(ctx context.Context, p *types.Project, service types.ServiceConfig,
- number int, inherit *moby.Container, autoRemove bool, attachStdin bool) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error) {
-
- labels, err := s.prepareLabels(service, number)
+//nolint:gocyclo
+func (s *composeService) getCreateConfigs(ctx context.Context,
+ p *types.Project,
+ service types.ServiceConfig,
+ number int,
+ inherit *container.Summary,
+ opts createOptions,
+) (createConfigs, error) {
+ labels, err := s.prepareLabels(opts.Labels, service, number)
if err != nil {
- return nil, nil, nil, err
+ return createConfigs{}, err
}
- var (
- runCmd strslice.StrSlice
- entrypoint strslice.StrSlice
- )
+ var runCmd, entrypoint []string
if service.Command != nil {
- runCmd = strslice.StrSlice(service.Command)
+ runCmd = service.Command
}
if service.Entrypoint != nil {
- entrypoint = strslice.StrSlice(service.Entrypoint)
+ entrypoint = service.Entrypoint
}
var (
@@ -250,14 +191,25 @@ func (s *composeService) getCreateOptions(ctx context.Context, p *types.Project,
stdinOpen = service.StdinOpen
)
- volumeMounts, binds, mounts, err := s.buildContainerVolumes(ctx, *p, service, inherit)
- if err != nil {
- return nil, nil, nil, err
- }
-
proxyConfig := types.MappingWithEquals(s.configFile().ParseProxyConfig(s.apiClient().DaemonHost(), nil))
env := proxyConfig.OverrideBy(service.Environment)
+ var mainNwName string
+ var mainNw *types.ServiceNetworkConfig
+ if len(service.Networks) > 0 {
+ mainNwName = service.NetworksByPriority()[0]
+ mainNw = service.Networks[mainNwName]
+ }
+
+ macAddress, err := s.prepareContainerMACAddress(ctx, service, mainNw, mainNwName)
+ if err != nil {
+ return createConfigs{}, err
+ }
+
+ healthcheck, err := s.ToMobyHealthCheck(ctx, service.HealthCheck)
+ if err != nil {
+ return createConfigs{}, err
+ }
containerConfig := container.Config{
Hostname: service.Hostname,
Domainname: service.DomainName,
@@ -265,63 +217,22 @@ func (s *composeService) getCreateOptions(ctx context.Context, p *types.Project,
ExposedPorts: buildContainerPorts(service),
Tty: tty,
OpenStdin: stdinOpen,
- StdinOnce: attachStdin && stdinOpen,
- AttachStdin: attachStdin,
+ StdinOnce: opts.AttachStdin && stdinOpen,
+ AttachStdin: opts.AttachStdin,
AttachStderr: true,
AttachStdout: true,
Cmd: runCmd,
- Image: getImageName(service, p.Name),
+ Image: api.GetImageNameOrDefault(service, p.Name),
WorkingDir: service.WorkingDir,
Entrypoint: entrypoint,
NetworkDisabled: service.NetworkMode == "disabled",
- MacAddress: service.MacAddress,
+ MacAddress: macAddress, // Field is deprecated since API v1.44, but kept for compatibility with older API versions.
Labels: labels,
StopSignal: service.StopSignal,
Env: ToMobyEnv(env),
- Healthcheck: ToMobyHealthCheck(service.HealthCheck),
- Volumes: volumeMounts,
+ Healthcheck: healthcheck,
StopTimeout: ToSeconds(service.StopGracePeriod),
- }
-
- portBindings := buildContainerPortBindingOptions(service)
-
- resources := getDeployResources(service)
-
- if service.NetworkMode == "" {
- service.NetworkMode = getDefaultNetworkMode(p, service)
- }
-
- var networkConfig *network.NetworkingConfig
-
- for _, id := range service.NetworksByPriority() {
- net := p.Networks[id]
- config := service.Networks[id]
- var ipam *network.EndpointIPAMConfig
- var (
- ipv4Address string
- ipv6Address string
- )
- if config != nil {
- ipv4Address = config.Ipv4Address
- ipv6Address = config.Ipv6Address
- ipam = &network.EndpointIPAMConfig{
- IPv4Address: ipv4Address,
- IPv6Address: ipv6Address,
- }
- }
- networkConfig = &network.NetworkingConfig{
- EndpointsConfig: map[string]*network.EndpointSettings{
- net.Name: {
- Aliases: getAliases(service, config),
- IPAddress: ipv4Address,
- IPv6Gateway: ipv6Address,
- IPAMConfig: ipam,
- },
- },
- }
- break //nolint:staticcheck
- }
-
+ } // VOLUMES/MOUNTS/FILESYSTEMS
tmpfs := map[string]string{}
for _, t := range service.Tmpfs {
if arr := strings.SplitN(t, ":", 2); len(arr) > 1 {
@@ -330,7 +241,28 @@ func (s *composeService) getCreateOptions(ctx context.Context, p *types.Project,
tmpfs[arr[0]] = ""
}
}
+ binds, mounts, err := s.buildContainerVolumes(ctx, *p, service, inherit)
+ if err != nil {
+ return createConfigs{}, err
+ }
+
+ // NETWORKING
+ links, err := s.getLinks(ctx, p.Name, service, number)
+ if err != nil {
+ return createConfigs{}, err
+ }
+ apiVersion, err := s.RuntimeVersion(ctx)
+ if err != nil {
+ return createConfigs{}, err
+ }
+ networkMode, networkingConfig, err := defaultNetworkSettings(p, service, number, links, opts.UseNetworkAliases, apiVersion)
+ if err != nil {
+ return createConfigs{}, err
+ }
+ portBindings := buildContainerPortBindingOptions(service)
+ // MISC
+ resources := getDeployResources(service)
var logConfig container.LogConfig
if service.Logging != nil {
logConfig = container.LogConfig{
@@ -338,28 +270,22 @@ func (s *composeService) getCreateOptions(ctx context.Context, p *types.Project,
Config: service.Logging.Options,
}
}
-
- var volumesFrom []string
- for _, v := range service.VolumesFrom {
- if !strings.HasPrefix(v, "container:") {
- return nil, nil, nil, fmt.Errorf("invalid volume_from: %s", v)
- }
- volumesFrom = append(volumesFrom, v[len("container:"):])
- }
-
- securityOpts, err := parseSecurityOpts(p, service.SecurityOpt)
+ securityOpts, unconfined, err := parseSecurityOpts(p, service.SecurityOpt)
if err != nil {
- return nil, nil, nil, err
+ return createConfigs{}, err
}
+
hostConfig := container.HostConfig{
- AutoRemove: autoRemove,
+ AutoRemove: opts.AutoRemove,
+ Annotations: service.Annotations,
Binds: binds,
Mounts: mounts,
- CapAdd: strslice.StrSlice(service.CapAdd),
- CapDrop: strslice.StrSlice(service.CapDrop),
- NetworkMode: container.NetworkMode(service.NetworkMode),
+ CapAdd: service.CapAdd,
+ CapDrop: service.CapDrop,
+ NetworkMode: networkMode,
Init: service.Init,
IpcMode: container.IpcMode(service.Ipc),
+ CgroupnsMode: container.CgroupnsMode(service.Cgroup),
ReadonlyRootfs: service.ReadOnly,
RestartPolicy: getRestartPolicy(service),
ShmSize: int64(service.ShmSize),
@@ -367,13 +293,15 @@ func (s *composeService) getCreateOptions(ctx context.Context, p *types.Project,
PortBindings: portBindings,
Resources: resources,
VolumeDriver: service.VolumeDriver,
- VolumesFrom: volumesFrom,
+ VolumesFrom: service.VolumesFrom,
DNS: service.DNS,
DNSSearch: service.DNSSearch,
DNSOptions: service.DNSOpts,
- ExtraHosts: service.ExtraHosts,
+ ExtraHosts: service.ExtraHosts.AsList(":"),
SecurityOpt: securityOpts,
+ StorageOpt: service.StorageOpt,
UsernsMode: container.UsernsMode(service.UserNSMode),
+ UTSMode: container.UTSMode(service.Uts),
Privileged: service.Privileged,
PidMode: container.PidMode(service.Pid),
Tmpfs: tmpfs,
@@ -381,75 +309,249 @@ func (s *composeService) getCreateOptions(ctx context.Context, p *types.Project,
Runtime: service.Runtime,
LogConfig: logConfig,
GroupAdd: service.GroupAdd,
+ Links: links,
+ OomScoreAdj: int(service.OomScoreAdj),
+ }
+
+ if unconfined {
+ hostConfig.MaskedPaths = []string{}
+ hostConfig.ReadonlyPaths = []string{}
+ }
+
+ cfgs := createConfigs{
+ Container: &containerConfig,
+ Host: &hostConfig,
+ Network: networkingConfig,
+ Links: links,
+ }
+ return cfgs, nil
+}
+
+// prepareContainerMACAddress handles the service-level mac_address field and the newer mac_address field added to service
+// network config. This newer field is only compatible with the Engine API v1.44 (and onwards), and this API version
+// also deprecates the container-wide mac_address field. Thus, this method will validate service config and mutate the
+// passed mainNw to provide backward-compatibility whenever possible.
+//
+// It returns the container-wide MAC address, but this value will be kept empty for newer API versions.
+func (s *composeService) prepareContainerMACAddress(ctx context.Context, service types.ServiceConfig, mainNw *types.ServiceNetworkConfig, nwName string) (string, error) {
+ version, err := s.RuntimeVersion(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ // Engine API 1.44 added support for endpoint-specific MAC address and now returns a warning when a MAC address is
+ // set in container.Config. Thus, we have to jump through a number of hoops:
+ //
+ // 1. Top-level mac_address and main endpoint's MAC address should be the same ;
+ // 2. If supported by the API, top-level mac_address should be migrated to the main endpoint and container.Config
+ // should be kept empty ;
+ // 3. Otherwise, the endpoint mac_address should be set in container.Config and no other endpoint-specific
+ // mac_address can be specified. If that's the case, use top-level mac_address ;
+ //
+ // After that, if an endpoint mac_address is set, it's either user-defined or migrated by the code below, so
+ // there's no need to check for API version in defaultNetworkSettings.
+ macAddress := service.MacAddress
+ if macAddress != "" && mainNw != nil && mainNw.MacAddress != "" && mainNw.MacAddress != macAddress {
+ return "", fmt.Errorf("the service-level mac_address should have the same value as network %s", nwName)
+ }
+ if versions.GreaterThanOrEqualTo(version, "1.44") {
+ if mainNw != nil && mainNw.MacAddress == "" {
+ mainNw.MacAddress = macAddress
+ }
+ macAddress = ""
+ } else if len(service.Networks) > 0 {
+ var withMacAddress []string
+ for nwName, nw := range service.Networks {
+ if nw != nil && nw.MacAddress != "" {
+ withMacAddress = append(withMacAddress, nwName)
+ }
+ }
+
+ if len(withMacAddress) > 1 {
+ return "", fmt.Errorf("a MAC address is specified for multiple networks (%s), but this feature requires Docker Engine v25 or later", strings.Join(withMacAddress, ", "))
+ }
+
+ if mainNw != nil && mainNw.MacAddress != "" {
+ macAddress = mainNw.MacAddress
+ }
+ }
+
+ return macAddress, nil
+}
+
+func getAliases(project *types.Project, service types.ServiceConfig, serviceIndex int, cfg *types.ServiceNetworkConfig, useNetworkAliases bool) []string {
+ aliases := []string{getContainerName(project.Name, service, serviceIndex)}
+ if useNetworkAliases {
+ aliases = append(aliases, service.Name)
+ if cfg != nil {
+ aliases = append(aliases, cfg.Aliases...)
+ }
}
+ return aliases
+}
- return &containerConfig, &hostConfig, networkConfig, nil
+func createEndpointSettings(p *types.Project, service types.ServiceConfig, serviceIndex int, networkKey string, links []string, useNetworkAliases bool) *network.EndpointSettings {
+ const ifname = "com.docker.network.endpoint.ifname"
+
+ config := service.Networks[networkKey]
+ var ipam *network.EndpointIPAMConfig
+ var (
+ ipv4Address string
+ ipv6Address string
+ macAddress string
+ driverOpts types.Options
+ gwPriority int
+ )
+ if config != nil {
+ ipv4Address = config.Ipv4Address
+ ipv6Address = config.Ipv6Address
+ ipam = &network.EndpointIPAMConfig{
+ IPv4Address: ipv4Address,
+ IPv6Address: ipv6Address,
+ LinkLocalIPs: config.LinkLocalIPs,
+ }
+ macAddress = config.MacAddress
+ driverOpts = config.DriverOpts
+ if config.InterfaceName != "" {
+ if driverOpts == nil {
+ driverOpts = map[string]string{}
+ }
+ if name, ok := driverOpts[ifname]; ok && name != config.InterfaceName {
+ logrus.Warnf("ignoring services.%s.networks.%s.interface_name as %s driver_opts is already declared", service.Name, networkKey, ifname)
+ }
+ driverOpts[ifname] = config.InterfaceName
+ }
+ gwPriority = config.GatewayPriority
+ }
+ return &network.EndpointSettings{
+ Aliases: getAliases(p, service, serviceIndex, config, useNetworkAliases),
+ Links: links,
+ IPAddress: ipv4Address,
+ IPv6Gateway: ipv6Address,
+ IPAMConfig: ipam,
+ MacAddress: macAddress,
+ DriverOpts: driverOpts,
+ GwPriority: gwPriority,
+ }
}
// copy/pasted from https://github.com/docker/cli/blob/9de1b162f/cli/command/container/opts.go#L673-L697 + RelativePath
// TODO find so way to share this code with docker/cli
-func parseSecurityOpts(p *types.Project, securityOpts []string) ([]string, error) {
- for key, opt := range securityOpts {
+func parseSecurityOpts(p *types.Project, securityOpts []string) ([]string, bool, error) {
+ var (
+ unconfined bool
+ parsed []string
+ )
+ for _, opt := range securityOpts {
+ if opt == "systempaths=unconfined" {
+ unconfined = true
+ continue
+ }
con := strings.SplitN(opt, "=", 2)
if len(con) == 1 && con[0] != "no-new-privileges" {
if strings.Contains(opt, ":") {
con = strings.SplitN(opt, ":", 2)
} else {
- return securityOpts, errors.Errorf("Invalid security-opt: %q", opt)
+ return securityOpts, false, fmt.Errorf("invalid security-opt: %q", opt)
}
}
- if con[0] == "seccomp" && con[1] != "unconfined" {
- f, err := ioutil.ReadFile(p.RelativePath(con[1]))
+ if con[0] == "seccomp" && con[1] != "unconfined" && con[1] != "builtin" {
+ f, err := os.ReadFile(p.RelativePath(con[1]))
if err != nil {
- return securityOpts, errors.Errorf("opening seccomp profile (%s) failed: %v", con[1], err)
+ return securityOpts, false, fmt.Errorf("opening seccomp profile (%s) failed: %w", con[1], err)
}
b := bytes.NewBuffer(nil)
if err := json.Compact(b, f); err != nil {
- return securityOpts, errors.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err)
+ return securityOpts, false, fmt.Errorf("compacting json for seccomp profile (%s) failed: %w", con[1], err)
}
- securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes())
+ parsed = append(parsed, fmt.Sprintf("seccomp=%s", b.Bytes()))
+ } else {
+ parsed = append(parsed, opt)
}
}
- return securityOpts, nil
+ return parsed, unconfined, nil
}
-func (s *composeService) prepareLabels(service types.ServiceConfig, number int) (map[string]string, error) {
- labels := map[string]string{}
- for k, v := range service.Labels {
- labels[k] = v
- }
- for k, v := range service.CustomLabels {
- labels[k] = v
- }
-
+func (s *composeService) prepareLabels(labels types.Labels, service types.ServiceConfig, number int) (map[string]string, error) {
hash, err := ServiceHash(service)
if err != nil {
return nil, err
}
labels[api.ConfigHashLabel] = hash
- labels[api.ContainerNumberLabel] = strconv.Itoa(number)
+ if number > 0 {
+ // One-off containers are not indexed
+ labels[api.ContainerNumberLabel] = strconv.Itoa(number)
+ }
var dependencies []string
for s, d := range service.DependsOn {
- dependencies = append(dependencies, s+":"+d.Condition)
+ dependencies = append(dependencies, fmt.Sprintf("%s:%s:%t", s, d.Condition, d.Restart))
}
labels[api.DependenciesLabel] = strings.Join(dependencies, ",")
return labels, nil
}
-func getDefaultNetworkMode(project *types.Project, service types.ServiceConfig) string {
+// defaultNetworkSettings determines the container.NetworkMode and corresponding network.NetworkingConfig (nil if not applicable).
+func defaultNetworkSettings(project *types.Project,
+ service types.ServiceConfig, serviceIndex int,
+ links []string, useNetworkAliases bool,
+ version string,
+) (container.NetworkMode, *network.NetworkingConfig, error) {
+ if service.NetworkMode != "" {
+ return container.NetworkMode(service.NetworkMode), nil, nil
+ }
+
if len(project.Networks) == 0 {
- return "none"
+ return "none", nil, nil
}
+ var primaryNetworkKey string
if len(service.Networks) > 0 {
- name := service.NetworksByPriority()[0]
- return project.Networks[name].Name
+ primaryNetworkKey = service.NetworksByPriority()[0]
+ } else {
+ primaryNetworkKey = "default"
+ }
+ primaryNetworkMobyNetworkName := project.Networks[primaryNetworkKey].Name
+ primaryNetworkEndpoint := createEndpointSettings(project, service, serviceIndex, primaryNetworkKey, links, useNetworkAliases)
+ endpointsConfig := map[string]*network.EndpointSettings{}
+
+ // Starting from API version 1.44, the Engine will take several EndpointsConfigs
+ // so we can pass all the extra networks we want the container to be connected to
+ // in the network configuration instead of connecting the container to each extra
+ // network individually after creation.
+ if versions.GreaterThanOrEqualTo(version, "1.44") {
+ if len(service.Networks) > 1 {
+ serviceNetworks := service.NetworksByPriority()
+ for _, networkKey := range serviceNetworks[1:] {
+ mobyNetworkName := project.Networks[networkKey].Name
+ epSettings := createEndpointSettings(project, service, serviceIndex, networkKey, links, useNetworkAliases)
+ endpointsConfig[mobyNetworkName] = epSettings
+ }
+ }
+ if primaryNetworkEndpoint.MacAddress == "" {
+ primaryNetworkEndpoint.MacAddress = service.MacAddress
+ }
+ }
+
+ if versions.LessThan(version, "1.49") {
+ for _, config := range service.Networks {
+ if config != nil && config.InterfaceName != "" {
+ return "", nil, fmt.Errorf("interface_name requires Docker Engine v28.1 or later")
+ }
+ }
+ }
+
+ endpointsConfig[primaryNetworkMobyNetworkName] = primaryNetworkEndpoint
+ networkConfig := &network.NetworkingConfig{
+ EndpointsConfig: endpointsConfig,
}
- return project.Networks["default"].Name
+ // From the Engine API docs:
+ // > Supported standard values are: bridge, host, none, and container:.
+ // > Any other value is taken as a custom network's name to which this container should connect to.
+ return container.NetworkMode(primaryNetworkMobyNetworkName), networkConfig, nil
}
func getRestartPolicy(service types.ServiceConfig) container.RestartPolicy {
@@ -461,7 +563,7 @@ func getRestartPolicy(service types.ServiceConfig) container.RestartPolicy {
attempts, _ = strconv.Atoi(split[1])
}
restart = container.RestartPolicy{
- Name: split[0],
+ Name: mapRestartPolicyCondition(split[0]),
MaximumRetryCount: attempts,
}
}
@@ -472,13 +574,29 @@ func getRestartPolicy(service types.ServiceConfig) container.RestartPolicy {
attempts = int(*policy.MaxAttempts)
}
restart = container.RestartPolicy{
- Name: policy.Condition,
+ Name: mapRestartPolicyCondition(policy.Condition),
MaximumRetryCount: attempts,
}
}
return restart
}
+func mapRestartPolicyCondition(condition string) container.RestartPolicyMode {
+ // map definitions of deploy.restart_policy to engine definitions
+ switch condition {
+ case "none", "no":
+ return container.RestartPolicyDisabled
+ case "on-failure":
+ return container.RestartPolicyOnFailure
+ case "unless-stopped":
+ return container.RestartPolicyUnlessStopped
+ case "any", "always":
+ return container.RestartPolicyAlways
+ default:
+ return container.RestartPolicyMode(condition)
+ }
+}
+
func getDeployResources(s types.ServiceConfig) container.Resources {
var swappiness *int64
if s.MemSwappiness != 0 {
@@ -498,7 +616,8 @@ func getDeployResources(s types.ServiceConfig) container.Resources {
CPURealtimePeriod: s.CPURTPeriod,
CPURealtimeRuntime: s.CPURTRuntime,
CPUShares: s.CPUShares,
- CPUPercent: int64(s.CPUS * 100),
+ NanoCPUs: int64(s.CPUS * 1e9),
+ CPUPercent: int64(s.CPUPercent * 100),
CpusetCpus: s.CPUSet,
DeviceCgroupRules: s.DeviceCgroupRules,
}
@@ -514,33 +633,46 @@ func getDeployResources(s types.ServiceConfig) container.Resources {
setReservations(s.Deploy.Resources.Reservations, &resources)
}
+ var cdiDeviceNames []string
for _, device := range s.Devices {
- // FIXME should use docker/cli parseDevice, unfortunately private
- src := ""
- dst := ""
- permissions := "rwm"
- arr := strings.Split(device, ":")
- switch len(arr) {
- case 3:
- permissions = arr[2]
- fallthrough
- case 2:
- dst = arr[1]
- fallthrough
- case 1:
- src = arr[0]
- }
- if dst == "" {
- dst = src
+
+ if device.Source == device.Target && cdi.IsQualifiedName(device.Source) {
+ cdiDeviceNames = append(cdiDeviceNames, device.Source)
+ continue
}
+
resources.Devices = append(resources.Devices, container.DeviceMapping{
- PathOnHost: src,
- PathInContainer: dst,
- CgroupPermissions: permissions,
+ PathOnHost: device.Source,
+ PathInContainer: device.Target,
+ CgroupPermissions: device.Permissions,
+ })
+ }
+
+ if len(cdiDeviceNames) > 0 {
+ resources.DeviceRequests = append(resources.DeviceRequests, container.DeviceRequest{
+ Driver: "cdi",
+ DeviceIDs: cdiDeviceNames,
+ })
+ }
+
+ for _, gpus := range s.Gpus {
+ resources.DeviceRequests = append(resources.DeviceRequests, container.DeviceRequest{
+ Driver: gpus.Driver,
+ Count: int(gpus.Count),
+ DeviceIDs: gpus.IDs,
+ Capabilities: [][]string{append(gpus.Capabilities, "gpu")},
+ Options: gpus.Options,
})
}
- for name, u := range s.Ulimits {
+ ulimits := toUlimits(s.Ulimits)
+ resources.Ulimits = ulimits
+ return resources
+}
+
+func toUlimits(m map[string]*types.UlimitsConfig) []*container.Ulimit {
+ var ulimits []*container.Ulimit
+ for name, u := range m {
soft := u.Single
if u.Soft != 0 {
soft = u.Soft
@@ -549,25 +681,32 @@ func getDeployResources(s types.ServiceConfig) container.Resources {
if u.Hard != 0 {
hard = u.Hard
}
- resources.Ulimits = append(resources.Ulimits, &units.Ulimit{
+ ulimits = append(ulimits, &container.Ulimit{
Name: name,
Hard: int64(hard),
Soft: int64(soft),
})
}
- return resources
+ return ulimits
}
func setReservations(reservations *types.Resource, resources *container.Resources) {
if reservations == nil {
return
}
+ // Cpu reservation is a swarm option and PIDs is only a limit
+ // So we only need to map memory reservation and devices
+ if reservations.MemoryBytes != 0 {
+ resources.MemoryReservation = int64(reservations.MemoryBytes)
+ }
+
for _, device := range reservations.Devices {
resources.DeviceRequests = append(resources.DeviceRequests, container.DeviceRequest{
Capabilities: [][]string{device.Capabilities},
Count: int(device.Count),
DeviceIDs: device.IDs,
Driver: device.Driver,
+ Options: device.Options,
})
}
}
@@ -579,9 +718,11 @@ func setLimits(limits *types.Resource, resources *container.Resources) {
if limits.MemoryBytes != 0 {
resources.Memory = int64(limits.MemoryBytes)
}
- if limits.NanoCPUs != "" {
- i, _ := strconv.ParseInt(limits.NanoCPUs, 10, 64)
- resources.NanoCPUs = i
+ if limits.NanoCPUs != 0 {
+ resources.NanoCPUs = int64(limits.NanoCPUs * 1e9)
+ }
+ if limits.Pids > 0 {
+ resources.PidsLimit = &limits.Pids
}
}
@@ -599,25 +740,25 @@ func setBlkio(blkio *types.BlkioConfig, resources *container.Resources) {
for _, b := range blkio.DeviceReadBps {
resources.BlkioDeviceReadBps = append(resources.BlkioDeviceReadBps, &blkiodev.ThrottleDevice{
Path: b.Path,
- Rate: b.Rate,
+ Rate: uint64(b.Rate),
})
}
for _, b := range blkio.DeviceReadIOps {
resources.BlkioDeviceReadIOps = append(resources.BlkioDeviceReadIOps, &blkiodev.ThrottleDevice{
Path: b.Path,
- Rate: b.Rate,
+ Rate: uint64(b.Rate),
})
}
for _, b := range blkio.DeviceWriteBps {
resources.BlkioDeviceWriteBps = append(resources.BlkioDeviceWriteBps, &blkiodev.ThrottleDevice{
Path: b.Path,
- Rate: b.Rate,
+ Rate: uint64(b.Rate),
})
}
for _, b := range blkio.DeviceWriteIOps {
resources.BlkioDeviceWriteIOps = append(resources.BlkioDeviceWriteIOps, &blkiodev.ThrottleDevice{
Path: b.Path,
- Rate: b.Rate,
+ Rate: uint64(b.Rate),
})
}
}
@@ -648,100 +789,146 @@ func buildContainerPortBindingOptions(s types.ServiceConfig) nat.PortMap {
return bindings
}
-func getVolumesFrom(project *types.Project, volumesFrom []string) ([]string, []string, error) {
- var volumes = []string{}
- var services = []string{}
- // parse volumes_from
- if len(volumesFrom) == 0 {
- return volumes, services, nil
- }
- for _, vol := range volumesFrom {
- spec := strings.Split(vol, ":")
- if len(spec) == 0 {
- continue
- }
- if spec[0] == "container" {
- volumes = append(volumes, strings.Join(spec[1:], ":"))
- continue
- }
- serviceName := spec[0]
- services = append(services, serviceName)
- service, err := project.GetService(serviceName)
- if err != nil {
- return nil, nil, err
- }
-
- firstContainer := getContainerName(project.Name, service, 1)
- v := fmt.Sprintf("container:%s", firstContainer)
- if len(spec) > 2 {
- v = fmt.Sprintf("container:%s:%s", firstContainer, strings.Join(spec[1:], ":"))
- }
- volumes = append(volumes, v)
- }
- return volumes, services, nil
-
-}
-
func getDependentServiceFromMode(mode string) string {
- if strings.HasPrefix(mode, types.NetworkModeServicePrefix) {
+ if strings.HasPrefix(
+ mode,
+ types.NetworkModeServicePrefix,
+ ) {
return mode[len(types.NetworkModeServicePrefix):]
}
return ""
}
-func (s *composeService) buildContainerVolumes(ctx context.Context, p types.Project, service types.ServiceConfig,
- inherit *moby.Container) (map[string]struct{}, []string, []mount.Mount, error) {
- var mounts = []mount.Mount{}
-
- image := getImageName(service, p.Name)
- imgInspect, _, err := s.apiClient().ImageInspectWithRaw(ctx, image)
- if err != nil {
- return nil, nil, nil, err
- }
+func (s *composeService) buildContainerVolumes(
+ ctx context.Context,
+ p types.Project,
+ service types.ServiceConfig,
+ inherit *container.Summary,
+) ([]string, []mount.Mount, error) {
+ var mounts []mount.Mount
+ var binds []string
- mountOptions, err := buildContainerMountOptions(p, service, imgInspect, inherit)
+ mountOptions, err := s.buildContainerMountOptions(ctx, p, service, inherit)
if err != nil {
- return nil, nil, nil, err
+ return nil, nil, err
}
- volumeMounts := map[string]struct{}{}
- binds := []string{}
-MOUNTS:
for _, m := range mountOptions {
- volumeMounts[m.Target] = struct{}{}
- // `Bind` API is used when host path need to be created if missing, `Mount` is preferred otherwise
- if m.Type == mount.TypeBind || m.Type == mount.TypeNamedPipe {
- for _, v := range service.Volumes {
- if v.Target == m.Target && v.Bind != nil && v.Bind.CreateHostPath {
- binds = append(binds, fmt.Sprintf("%s:%s:%s", m.Source, m.Target, getBindMode(v.Bind, m.ReadOnly)))
- continue MOUNTS
+ switch m.Type {
+ case mount.TypeBind:
+ // `Mount` is preferred but does not offer option to created host path if missing
+ // so `Bind` API is used here with raw volume string
+ // see https://github.com/moby/moby/issues/43483
+ v := findVolumeByTarget(service.Volumes, m.Target)
+ if v != nil {
+ if v.Type != types.VolumeTypeBind {
+ v.Source = m.Source
+ }
+ if !bindRequiresMountAPI(v.Bind) {
+ source := m.Source
+ if vol := findVolumeByName(p.Volumes, m.Source); vol != nil {
+ source = m.Source
+ }
+ binds = append(binds, toBindString(source, v))
+ continue
}
}
+ case mount.TypeVolume:
+ v := findVolumeByTarget(service.Volumes, m.Target)
+ vol := findVolumeByName(p.Volumes, m.Source)
+ if v != nil && vol != nil {
+ // Prefer the bind API if no advanced option is used, to preserve backward compatibility
+ if !volumeRequiresMountAPI(v.Volume) {
+ binds = append(binds, toBindString(vol.Name, v))
+ continue
+ }
+ }
+ case mount.TypeImage:
+ version, err := s.RuntimeVersion(ctx)
+ if err != nil {
+ return nil, nil, err
+ }
+ if versions.LessThan(version, "1.48") {
+ return nil, nil, fmt.Errorf("volume with type=image require Docker Engine v28 or later")
+ }
}
mounts = append(mounts, m)
}
- return volumeMounts, binds, mounts, nil
+ return binds, mounts, nil
+}
+
+func toBindString(name string, v *types.ServiceVolumeConfig) string {
+ access := "rw"
+ if v.ReadOnly {
+ access = "ro"
+ }
+ options := []string{access}
+ if v.Bind != nil && v.Bind.SELinux != "" {
+ options = append(options, v.Bind.SELinux)
+ }
+ if v.Bind != nil && v.Bind.Propagation != "" {
+ options = append(options, v.Bind.Propagation)
+ }
+ if v.Volume != nil && v.Volume.NoCopy {
+ options = append(options, "nocopy")
+ }
+ return fmt.Sprintf("%s:%s:%s", name, v.Target, strings.Join(options, ","))
}
-func getBindMode(bind *types.ServiceVolumeBind, readOnly bool) string {
- mode := "rw"
+func findVolumeByName(volumes types.Volumes, name string) *types.VolumeConfig {
+ for _, vol := range volumes {
+ if vol.Name == name {
+ return &vol
+ }
+ }
+ return nil
+}
- if readOnly {
- mode = "ro"
+func findVolumeByTarget(volumes []types.ServiceVolumeConfig, target string) *types.ServiceVolumeConfig {
+ for _, v := range volumes {
+ if v.Target == target {
+ return &v
+ }
}
+ return nil
+}
- switch bind.SELinux {
- case types.SELinuxShared:
- mode += ",z"
- case types.SELinuxPrivate:
- mode += ",Z"
+// bindRequiresMountAPI check if Bind declaration can be implemented by the plain old Bind API or uses any of the advanced
+// options which require use of Mount API
+func bindRequiresMountAPI(bind *types.ServiceVolumeBind) bool {
+ switch {
+ case bind == nil:
+ return false
+ case !bind.CreateHostPath:
+ return true
+ case bind.Propagation != "":
+ return true
+ case bind.Recursive != "":
+ return true
+ default:
+ return false
}
+}
- return mode
+// volumeRequiresMountAPI check if Volume declaration can be implemented by the plain old Bind API or uses any of the advanced
+// options which require use of Mount API
+func volumeRequiresMountAPI(vol *types.ServiceVolumeVolume) bool {
+ switch {
+ case vol == nil:
+ return false
+ case len(vol.Labels) > 0:
+ return true
+ case vol.Subpath != "":
+ return true
+ case vol.NoCopy:
+ return true
+ default:
+ return false
+ }
}
-func buildContainerMountOptions(p types.Project, s types.ServiceConfig, img moby.ImageInspect, inherit *moby.Container) ([]mount.Mount, error) {
- var mounts = map[string]mount.Mount{}
+func (s *composeService) buildContainerMountOptions(ctx context.Context, p types.Project, service types.ServiceConfig, inherit *container.Summary) ([]mount.Mount, error) {
+ mounts := map[string]mount.Mount{}
if inherit != nil {
for _, m := range inherit.Mounts {
if m.Type == "tmpfs" {
@@ -751,7 +938,11 @@ func buildContainerMountOptions(p types.Project, s types.ServiceConfig, img moby
if m.Type == "volume" {
src = m.Name
}
- m.Destination = path.Clean(m.Destination)
+
+ img, err := s.apiClient().ImageInspect(ctx, api.GetImageNameOrDefault(service, p.Name))
+ if err != nil {
+ return nil, err
+ }
if img.Config != nil {
if _, ok := img.Config.Volumes[m.Destination]; ok {
@@ -765,7 +956,7 @@ func buildContainerMountOptions(p types.Project, s types.ServiceConfig, img moby
}
}
volumes := []types.ServiceVolumeConfig{}
- for _, v := range s.Volumes {
+ for _, v := range service.Volumes {
if v.Target != m.Destination || v.Source != "" {
volumes = append(volumes, v)
continue
@@ -778,11 +969,11 @@ func buildContainerMountOptions(p types.Project, s types.ServiceConfig, img moby
ReadOnly: !m.RW,
}
}
- s.Volumes = volumes
+ service.Volumes = volumes
}
}
- mounts, err := fillBindMounts(p, s, mounts)
+ mounts, err := fillBindMounts(p, service, mounts)
if err != nil {
return nil, err
}
@@ -828,22 +1019,37 @@ func fillBindMounts(p types.Project, s types.ServiceConfig, m map[string]mount.M
}
func buildContainerConfigMounts(p types.Project, s types.ServiceConfig) ([]mount.Mount, error) {
- var mounts = map[string]mount.Mount{}
+ mounts := map[string]mount.Mount{}
configsBaseDir := "/"
for _, config := range s.Configs {
target := config.Target
if config.Target == "" {
target = configsBaseDir + config.Source
- } else if !isUnixAbs(config.Target) {
+ } else if !isAbsTarget(config.Target) {
target = configsBaseDir + config.Target
}
definedConfig := p.Configs[config.Source]
- if definedConfig.External.External {
+ if definedConfig.External {
return nil, fmt.Errorf("unsupported external config %s", definedConfig.Name)
}
+ if definedConfig.Driver != "" {
+ return nil, errors.New("Docker Compose does not support configs.*.driver") //nolint:staticcheck
+ }
+ if definedConfig.TemplateDriver != "" {
+ return nil, errors.New("Docker Compose does not support configs.*.template_driver") //nolint:staticcheck
+ }
+
+ if definedConfig.Environment != "" || definedConfig.Content != "" {
+ continue
+ }
+
+ if config.UID != "" || config.GID != "" || config.Mode != nil {
+ logrus.Warn("config `uid`, `gid` and `mode` are not supported, they will be ignored")
+ }
+
bindMount, err := buildMount(p, types.ServiceVolumeConfig{
Type: types.VolumeTypeBind,
Source: definedConfig.File,
@@ -863,32 +1069,54 @@ func buildContainerConfigMounts(p types.Project, s types.ServiceConfig) ([]mount
}
func buildContainerSecretMounts(p types.Project, s types.ServiceConfig) ([]mount.Mount, error) {
- var mounts = map[string]mount.Mount{}
+ mounts := map[string]mount.Mount{}
secretsDir := "/run/secrets/"
for _, secret := range s.Secrets {
target := secret.Target
if secret.Target == "" {
target = secretsDir + secret.Source
- } else if !isUnixAbs(secret.Target) {
+ } else if !isAbsTarget(secret.Target) {
target = secretsDir + secret.Target
}
definedSecret := p.Secrets[secret.Source]
- if definedSecret.External.External {
+ if definedSecret.External {
return nil, fmt.Errorf("unsupported external secret %s", definedSecret.Name)
}
- mount, err := buildMount(p, types.ServiceVolumeConfig{
+ if definedSecret.Driver != "" {
+ return nil, errors.New("Docker Compose does not support secrets.*.driver") //nolint:staticcheck
+ }
+ if definedSecret.TemplateDriver != "" {
+ return nil, errors.New("Docker Compose does not support secrets.*.template_driver") //nolint:staticcheck
+ }
+
+ if definedSecret.Environment != "" {
+ continue
+ }
+
+ if secret.UID != "" || secret.GID != "" || secret.Mode != nil {
+ logrus.Warn("secrets `uid`, `gid` and `mode` are not supported, they will be ignored")
+ }
+
+ if _, err := os.Stat(definedSecret.File); os.IsNotExist(err) {
+ logrus.Warnf("secret file %s does not exist", definedSecret.Name)
+ }
+
+ mnt, err := buildMount(p, types.ServiceVolumeConfig{
Type: types.VolumeTypeBind,
Source: definedSecret.File,
Target: target,
ReadOnly: true,
+ Bind: &types.ServiceVolumeBind{
+ CreateHostPath: false,
+ },
})
if err != nil {
return nil, err
}
- mounts[target] = mount
+ mounts[target] = mnt
}
values := make([]mount.Mount, 0, len(mounts))
for _, v := range mounts {
@@ -897,23 +1125,31 @@ func buildContainerSecretMounts(p types.Project, s types.ServiceConfig) ([]mount
return values, nil
}
-func isUnixAbs(path string) bool {
- return strings.HasPrefix(path, "/")
+func isAbsTarget(p string) bool {
+ return isUnixAbs(p) || isWindowsAbs(p)
+}
+
+func isUnixAbs(p string) bool {
+ return strings.HasPrefix(p, "/")
+}
+
+func isWindowsAbs(p string) bool {
+ return paths.IsWindowsAbs(p)
}
func buildMount(project types.Project, volume types.ServiceVolumeConfig) (mount.Mount, error) {
source := volume.Source
- // on windows, filepath.IsAbs(source) is false for unix style abs path like /var/run/docker.sock.
- // do not replace these with filepath.Abs(source) that will include a default drive.
- if volume.Type == types.VolumeTypeBind && !filepath.IsAbs(source) && !strings.HasPrefix(source, "/") {
- // volume source has already been prefixed with workdir if required, by compose-go project loader
- var err error
- source, err = filepath.Abs(source)
- if err != nil {
- return mount.Mount{}, err
+ switch volume.Type {
+ case types.VolumeTypeBind:
+ if !filepath.IsAbs(source) && !isUnixAbs(source) && !isWindowsAbs(source) {
+ // volume source has already been prefixed with workdir if required, by compose-go project loader
+ var err error
+ source, err = filepath.Abs(source)
+ if err != nil {
+ return mount.Mount{}, err
+ }
}
- }
- if volume.Type == types.VolumeTypeVolume {
+ case types.VolumeTypeVolume:
if volume.Source != "" {
pVolume, ok := project.Volumes[volume.Source]
if ok {
@@ -922,9 +1158,11 @@ func buildMount(project types.Project, volume types.ServiceVolumeConfig) (mount.
}
}
- bind, vol, tmpfs := buildMountOptions(volume)
+ bind, vol, tmpfs, img := buildMountOptions(volume)
- volume.Target = path.Clean(volume.Target)
+ if bind != nil {
+ volume.Type = types.VolumeTypeBind
+ }
return mount.Mount{
Type: mount.Type(volume.Type),
@@ -935,47 +1173,54 @@ func buildMount(project types.Project, volume types.ServiceVolumeConfig) (mount.
BindOptions: bind,
VolumeOptions: vol,
TmpfsOptions: tmpfs,
+ ImageOptions: img,
}, nil
}
-func buildMountOptions(volume types.ServiceVolumeConfig) (*mount.BindOptions, *mount.VolumeOptions, *mount.TmpfsOptions) {
+func buildMountOptions(volume types.ServiceVolumeConfig) (*mount.BindOptions, *mount.VolumeOptions, *mount.TmpfsOptions, *mount.ImageOptions) {
+ if volume.Type != types.VolumeTypeBind && volume.Bind != nil {
+ logrus.Warnf("mount of type `%s` should not define `bind` option", volume.Type)
+ }
+ if volume.Type != types.VolumeTypeVolume && volume.Volume != nil {
+ logrus.Warnf("mount of type `%s` should not define `volume` option", volume.Type)
+ }
+ if volume.Type != types.VolumeTypeTmpfs && volume.Tmpfs != nil {
+ logrus.Warnf("mount of type `%s` should not define `tmpfs` option", volume.Type)
+ }
+ if volume.Type != types.VolumeTypeImage && volume.Image != nil {
+ logrus.Warnf("mount of type `%s` should not define `image` option", volume.Type)
+ }
+
switch volume.Type {
case "bind":
- if volume.Volume != nil {
- logrus.Warnf("mount of type `bind` should not define `volume` option")
- }
- if volume.Tmpfs != nil {
- logrus.Warnf("mount of type `tmpfs` should not define `tmpfs` option")
- }
- return buildBindOption(volume.Bind), nil, nil
+ return buildBindOption(volume.Bind), nil, nil, nil
case "volume":
- if volume.Bind != nil {
- logrus.Warnf("mount of type `volume` should not define `bind` option")
- }
- if volume.Tmpfs != nil {
- logrus.Warnf("mount of type `volume` should not define `tmpfs` option")
- }
- return nil, buildVolumeOptions(volume.Volume), nil
+ return nil, buildVolumeOptions(volume.Volume), nil, nil
case "tmpfs":
- if volume.Bind != nil {
- logrus.Warnf("mount of type `tmpfs` should not define `bind` option")
- }
- if volume.Volume != nil {
- logrus.Warnf("mount of type `tmpfs` should not define `volume` option")
- }
- return nil, nil, buildTmpfsOptions(volume.Tmpfs)
+ return nil, nil, buildTmpfsOptions(volume.Tmpfs), nil
+ case "image":
+ return nil, nil, nil, buildImageOptions(volume.Image)
}
- return nil, nil, nil
+ return nil, nil, nil, nil
}
func buildBindOption(bind *types.ServiceVolumeBind) *mount.BindOptions {
if bind == nil {
return nil
}
- return &mount.BindOptions{
- Propagation: mount.Propagation(bind.Propagation),
- // NonRecursive: false, FIXME missing from model ?
+ opts := &mount.BindOptions{
+ Propagation: mount.Propagation(bind.Propagation),
+ CreateMountpoint: bind.CreateHostPath,
+ }
+ switch bind.Recursive {
+ case "disabled":
+ opts.NonRecursive = true
+ case "writable":
+ opts.ReadOnlyNonRecursive = true
+ case "readonly":
+ opts.ReadOnlyForceRecursive = true
}
+ return opts
}
func buildVolumeOptions(vol *types.ServiceVolumeVolume) *mount.VolumeOptions {
@@ -983,8 +1228,9 @@ func buildVolumeOptions(vol *types.ServiceVolumeVolume) *mount.VolumeOptions {
return nil
}
return &mount.VolumeOptions{
- NoCopy: vol.NoCopy,
- // Labels: , // FIXME missing from model ?
+ NoCopy: vol.NoCopy,
+ Subpath: vol.Subpath,
+ Labels: vol.Labels,
// DriverConfig: , // FIXME missing from model ?
}
}
@@ -995,117 +1241,308 @@ func buildTmpfsOptions(tmpfs *types.ServiceVolumeTmpfs) *mount.TmpfsOptions {
}
return &mount.TmpfsOptions{
SizeBytes: int64(tmpfs.Size),
- // Mode: , // FIXME missing from model ?
+ Mode: os.FileMode(tmpfs.Mode),
}
}
-func getAliases(s types.ServiceConfig, c *types.ServiceNetworkConfig) []string {
- aliases := []string{s.Name}
- if c != nil {
- aliases = append(aliases, c.Aliases...)
+func buildImageOptions(image *types.ServiceVolumeImage) *mount.ImageOptions {
+ if image == nil {
+ return nil
+ }
+ return &mount.ImageOptions{
+ Subpath: image.SubPath,
}
- return aliases
}
-func (s *composeService) ensureNetwork(ctx context.Context, n types.NetworkConfig) error {
- _, err := s.apiClient().NetworkInspect(ctx, n.Name, moby.NetworkInspectOptions{})
- if err != nil {
- if errdefs.IsNotFound(err) {
- if n.External.External {
- if n.Driver == "overlay" {
- // Swarm nodes do not register overlay networks that were
- // created on a different node unless they're in use.
- // Here we assume `driver` is relevant for a network we don't manage
- // which is a non-sense, but this is our legacy ¯\(ツ)/¯
- // networkAttach will later fail anyway if network actually doesn't exists
- return nil
- }
- return fmt.Errorf("network %s declared as external, but could not be found", n.Name)
- }
- var ipam *network.IPAM
- if n.Ipam.Config != nil {
- var config []network.IPAMConfig
- for _, pool := range n.Ipam.Config {
- config = append(config, network.IPAMConfig{
- Subnet: pool.Subnet,
- IPRange: pool.IPRange,
- Gateway: pool.Gateway,
- AuxAddress: pool.AuxiliaryAddresses,
- })
- }
- ipam = &network.IPAM{
- Driver: n.Ipam.Driver,
- Config: config,
- }
+func (s *composeService) ensureNetwork(ctx context.Context, project *types.Project, name string, n *types.NetworkConfig) (string, error) {
+ if n.External {
+ return s.resolveExternalNetwork(ctx, n)
+ }
+
+ id, err := s.resolveOrCreateNetwork(ctx, project, name, n)
+ if errdefs.IsConflict(err) {
+ // Maybe another execution of `docker compose up|run` created same network
+ // let's retry once
+ return s.resolveOrCreateNetwork(ctx, project, name, n)
+ }
+ return id, err
+}
+
+func (s *composeService) resolveOrCreateNetwork(ctx context.Context, project *types.Project, name string, n *types.NetworkConfig) (string, error) { //nolint:gocyclo
+ // This is containers that could be left after a diverged network was removed
+ var dangledContainers Containers
+
+ // First, try to find a unique network matching by name or ID
+ inspect, err := s.apiClient().NetworkInspect(ctx, n.Name, network.InspectOptions{})
+ if err == nil {
+ // NetworkInspect will match on ID prefix, so double check we get the expected one
+ // as looking for network named `db` we could erroneously match network ID `db9086999caf`
+ if inspect.Name == n.Name || inspect.ID == n.Name {
+ p, ok := inspect.Labels[api.ProjectLabel]
+ if !ok {
+ logrus.Warnf("a network with name %s exists but was not created by compose.\n"+
+ "Set `external: true` to use an existing network", n.Name)
+ } else if p != project.Name {
+ logrus.Warnf("a network with name %s exists but was not created for project %q.\n"+
+ "Set `external: true` to use an existing network", n.Name, project.Name)
}
- createOpts := moby.NetworkCreate{
- // TODO NameSpace Labels
- Labels: n.Labels,
- Driver: n.Driver,
- Options: n.DriverOpts,
- Internal: n.Internal,
- Attachable: n.Attachable,
- IPAM: ipam,
- EnableIPv6: n.EnableIPv6,
+ if inspect.Labels[api.NetworkLabel] != name {
+ return "", fmt.Errorf(
+ "network %s was found but has incorrect label %s set to %q (expected: %q)",
+ n.Name,
+ api.NetworkLabel,
+ inspect.Labels[api.NetworkLabel],
+ name,
+ )
}
- if n.Ipam.Driver != "" || len(n.Ipam.Config) > 0 {
- createOpts.IPAM = &network.IPAM{}
+ hash := inspect.Labels[api.ConfigHashLabel]
+ expected, err := NetworkHash(n)
+ if err != nil {
+ return "", err
}
-
- if n.Ipam.Driver != "" {
- createOpts.IPAM.Driver = n.Ipam.Driver
+ if hash == "" || hash == expected {
+ return inspect.ID, nil
}
- for _, ipamConfig := range n.Ipam.Config {
- config := network.IPAMConfig{
- Subnet: ipamConfig.Subnet,
- }
- createOpts.IPAM.Config = append(createOpts.IPAM.Config, config)
- }
- networkEventName := fmt.Sprintf("Network %s", n.Name)
- w := progress.ContextWriter(ctx)
- w.Event(progress.CreatingEvent(networkEventName))
- if _, err := s.apiClient().NetworkCreate(ctx, n.Name, createOpts); err != nil {
- w.Event(progress.ErrorEvent(networkEventName))
- return errors.Wrapf(err, "failed to create network %s", n.Name)
+ dangledContainers, err = s.removeDivergedNetwork(ctx, project, name, n)
+ if err != nil {
+ return "", err
}
- w.Event(progress.CreatedEvent(networkEventName))
- return nil
}
- return err
}
- return nil
+ // ignore other errors. Typically, an ambiguous request by name results in some generic `invalidParameter` error
+
+ // Either not found, or name is ambiguous - use NetworkList to list by name
+ networks, err := s.apiClient().NetworkList(ctx, network.ListOptions{
+ Filters: filters.NewArgs(filters.Arg("name", n.Name)),
+ })
+ if err != nil {
+ return "", err
+ }
+
+ // NetworkList Matches all or part of a network name, so we have to filter for a strict match
+ networks = slices.DeleteFunc(networks, func(net network.Summary) bool {
+ return net.Name != n.Name
+ })
+
+ for _, net := range networks {
+ if net.Labels[api.ProjectLabel] == project.Name &&
+ net.Labels[api.NetworkLabel] == name {
+ return net.ID, nil
+ }
+ }
+
+ // we could have set NetworkList with a projectFilter and networkFilter but not doing so allows to catch this
+ // scenario were a network with same name exists but doesn't have label, and use of `CheckDuplicate: true`
+ // prevents to create another one.
+ if len(networks) > 0 {
+ logrus.Warnf("a network with name %s exists but was not created by compose.\n"+
+ "Set `external: true` to use an existing network", n.Name)
+ return networks[0].ID, nil
+ }
+
+ var ipam *network.IPAM
+ if n.Ipam.Config != nil {
+ var config []network.IPAMConfig
+ for _, pool := range n.Ipam.Config {
+ config = append(config, network.IPAMConfig{
+ Subnet: pool.Subnet,
+ IPRange: pool.IPRange,
+ Gateway: pool.Gateway,
+ AuxAddress: pool.AuxiliaryAddresses,
+ })
+ }
+ ipam = &network.IPAM{
+ Driver: n.Ipam.Driver,
+ Config: config,
+ }
+ }
+ hash, err := NetworkHash(n)
+ if err != nil {
+ return "", err
+ }
+ n.CustomLabels = n.CustomLabels.Add(api.ConfigHashLabel, hash)
+ createOpts := network.CreateOptions{
+ Labels: mergeLabels(n.Labels, n.CustomLabels),
+ Driver: n.Driver,
+ Options: n.DriverOpts,
+ Internal: n.Internal,
+ Attachable: n.Attachable,
+ IPAM: ipam,
+ EnableIPv6: n.EnableIPv6,
+ EnableIPv4: n.EnableIPv4,
+ }
+
+ if n.Ipam.Driver != "" || len(n.Ipam.Config) > 0 {
+ createOpts.IPAM = &network.IPAM{}
+ }
+
+ if n.Ipam.Driver != "" {
+ createOpts.IPAM.Driver = n.Ipam.Driver
+ }
+
+ for _, ipamConfig := range n.Ipam.Config {
+ config := network.IPAMConfig{
+ Subnet: ipamConfig.Subnet,
+ IPRange: ipamConfig.IPRange,
+ Gateway: ipamConfig.Gateway,
+ AuxAddress: ipamConfig.AuxiliaryAddresses,
+ }
+ createOpts.IPAM.Config = append(createOpts.IPAM.Config, config)
+ }
+
+ networkEventName := fmt.Sprintf("Network %s", n.Name)
+ s.events.On(creatingEvent(networkEventName))
+
+ resp, err := s.apiClient().NetworkCreate(ctx, n.Name, createOpts)
+ if err != nil {
+ s.events.On(errorEvent(networkEventName, err.Error()))
+ return "", fmt.Errorf("failed to create network %s: %w", n.Name, err)
+ }
+ s.events.On(createdEvent(networkEventName))
+
+ err = s.connectNetwork(ctx, n.Name, dangledContainers, nil)
+ if err != nil {
+ return "", err
+ }
+
+ return resp.ID, nil
+}
+
+func (s *composeService) removeDivergedNetwork(ctx context.Context, project *types.Project, name string, n *types.NetworkConfig) (Containers, error) {
+ // Remove services attached to this network to force recreation
+ var services []string
+ for _, service := range project.Services.Filter(func(config types.ServiceConfig) bool {
+ _, ok := config.Networks[name]
+ return ok
+ }) {
+ services = append(services, service.Name)
+ }
+
+ // Stop containers so we can remove network
+ // They will be restarted (actually: recreated) with the updated network
+ err := s.stop(ctx, project.Name, api.StopOptions{
+ Services: services,
+ Project: project,
+ }, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ containers, err := s.getContainers(ctx, project.Name, oneOffExclude, true, services...)
+ if err != nil {
+ return nil, err
+ }
+
+ err = s.disconnectNetwork(ctx, n.Name, containers)
+ if err != nil {
+ return nil, err
+ }
+
+ err = s.apiClient().NetworkRemove(ctx, n.Name)
+ eventName := fmt.Sprintf("Network %s", n.Name)
+ s.events.On(removedEvent(eventName))
+ return containers, err
}
-func (s *composeService) removeNetwork(ctx context.Context, network string, w progress.Writer) error {
- eventName := fmt.Sprintf("Network %s", network)
- w.Event(progress.RemovingEvent(eventName))
+func (s *composeService) disconnectNetwork(
+ ctx context.Context,
+ nwName string,
+ containers Containers,
+) error {
+ for _, c := range containers {
+ err := s.apiClient().NetworkDisconnect(ctx, nwName, c.ID, true)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
- if err := s.apiClient().NetworkRemove(ctx, network); err != nil {
- w.Event(progress.ErrorEvent(eventName))
- return errors.Wrapf(err, fmt.Sprintf("failed to remove network %s", network))
+func (s *composeService) connectNetwork(
+ ctx context.Context,
+ nwName string,
+ containers Containers,
+ config *network.EndpointSettings,
+) error {
+ for _, c := range containers {
+ err := s.apiClient().NetworkConnect(ctx, nwName, c.ID, config)
+ if err != nil {
+ return err
+ }
}
- w.Event(progress.RemovedEvent(eventName))
return nil
}
-func (s *composeService) ensureVolume(ctx context.Context, volume types.VolumeConfig, project string) error {
+func (s *composeService) resolveExternalNetwork(ctx context.Context, n *types.NetworkConfig) (string, error) {
+ // NetworkInspect will match on ID prefix, so NetworkList with a name
+ // filter is used to look for an exact match to prevent e.g. a network
+ // named `db` from getting erroneously matched to a network with an ID
+ // like `db9086999caf`
+ networks, err := s.apiClient().NetworkList(ctx, network.ListOptions{
+ Filters: filters.NewArgs(filters.Arg("name", n.Name)),
+ })
+ if err != nil {
+ return "", err
+ }
+
+ if len(networks) == 0 {
+ // in this instance, n.Name is really an ID
+ sn, err := s.apiClient().NetworkInspect(ctx, n.Name, network.InspectOptions{})
+ if err == nil {
+ networks = append(networks, sn)
+ } else if !errdefs.IsNotFound(err) {
+ return "", err
+ }
+
+ }
+
+ // NetworkList API doesn't return the exact name match, so we can retrieve more than one network with a request
+ networks = slices.DeleteFunc(networks, func(net network.Inspect) bool {
+ // this function is called during the rebuild stage of `compose watch`.
+ // we still require just one network back, but we need to run the search on the ID
+ return net.Name != n.Name && net.ID != n.Name
+ })
+
+ switch len(networks) {
+ case 1:
+ return networks[0].ID, nil
+ case 0:
+ enabled, err := s.isSWarmEnabled(ctx)
+ if err != nil {
+ return "", err
+ }
+ if enabled {
+ // Swarm nodes do not register overlay networks that were
+ // created on a different node unless they're in use.
+ // So we can't preemptively check network exists, but
+ // networkAttach will later fail anyway if network actually doesn't exist
+ return "swarm", nil
+ }
+ return "", fmt.Errorf("network %s declared as external, but could not be found", n.Name)
+ default:
+ return "", fmt.Errorf("multiple networks with name %q were found. Use network ID as `name` to avoid ambiguity", n.Name)
+ }
+}
+
+func (s *composeService) ensureVolume(ctx context.Context, name string, volume types.VolumeConfig, project *types.Project) (string, error) {
inspected, err := s.apiClient().VolumeInspect(ctx, volume.Name)
if err != nil {
if !errdefs.IsNotFound(err) {
- return err
+ return "", err
}
- if volume.External.External {
- return fmt.Errorf("external volume %q not found", volume.Name)
+ if volume.External {
+ return "", fmt.Errorf("external volume %q not found", volume.Name)
}
- err := s.createVolume(ctx, volume)
- return err
+ err = s.createVolume(ctx, volume)
+ return volume.Name, err
}
- if volume.External.External {
- return nil
+ if volume.External {
+ return volume.Name, nil
}
// Volume exists with name, but let's double-check this is the expected one
@@ -1113,26 +1550,90 @@ func (s *composeService) ensureVolume(ctx context.Context, volume types.VolumeCo
if !ok {
logrus.Warnf("volume %q already exists but was not created by Docker Compose. Use `external: true` to use an existing volume", volume.Name)
}
- if ok && p != project {
- logrus.Warnf("volume %q already exists but was not created for project %q. Use `external: true` to use an existing volume", volume.Name, p)
+ if ok && p != project.Name {
+ logrus.Warnf("volume %q already exists but was created for project %q (expected %q). Use `external: true` to use an existing volume", volume.Name, p, project.Name)
}
- return nil
+
+ expected, err := VolumeHash(volume)
+ if err != nil {
+ return "", err
+ }
+ actual, ok := inspected.Labels[api.ConfigHashLabel]
+ if ok && actual != expected {
+ msg := fmt.Sprintf("Volume %q exists but doesn't match configuration in compose file. Recreate (data will be lost)?", volume.Name)
+ confirm, err := s.prompt(msg, false)
+ if err != nil {
+ return "", err
+ }
+ if confirm {
+ err = s.removeDivergedVolume(ctx, name, volume, project)
+ if err != nil {
+ return "", err
+ }
+ return volume.Name, s.createVolume(ctx, volume)
+ }
+ }
+ return inspected.Name, nil
+}
+
+func (s *composeService) removeDivergedVolume(ctx context.Context, name string, volume types.VolumeConfig, project *types.Project) error {
+ // Remove services mounting divergent volume
+ var services []string
+ for _, service := range project.Services.Filter(func(config types.ServiceConfig) bool {
+ for _, cfg := range config.Volumes {
+ if cfg.Source == name {
+ return true
+ }
+ }
+ return false
+ }) {
+ services = append(services, service.Name)
+ }
+
+ err := s.stop(ctx, project.Name, api.StopOptions{
+ Services: services,
+ Project: project,
+ }, nil)
+ if err != nil {
+ return err
+ }
+
+ containers, err := s.getContainers(ctx, project.Name, oneOffExclude, true, services...)
+ if err != nil {
+ return err
+ }
+
+ // FIXME (ndeloof) we have to remove container so we can recreate volume
+ // but doing so we can't inherit anonymous volumes from previous instance
+ err = s.remove(ctx, containers, api.RemoveOptions{
+ Services: services,
+ Project: project,
+ })
+ if err != nil {
+ return err
+ }
+
+ return s.apiClient().VolumeRemove(ctx, volume.Name, true)
}
func (s *composeService) createVolume(ctx context.Context, volume types.VolumeConfig) error {
- eventName := fmt.Sprintf("Volume %q", volume.Name)
- w := progress.ContextWriter(ctx)
- w.Event(progress.CreatingEvent(eventName))
- _, err := s.apiClient().VolumeCreate(ctx, volume_api.VolumeCreateBody{
- Labels: volume.Labels,
+ eventName := fmt.Sprintf("Volume %s", volume.Name)
+ s.events.On(creatingEvent(eventName))
+ hash, err := VolumeHash(volume)
+ if err != nil {
+ return err
+ }
+ volume.CustomLabels.Add(api.ConfigHashLabel, hash)
+ _, err = s.apiClient().VolumeCreate(ctx, volumetypes.CreateOptions{
+ Labels: mergeLabels(volume.Labels, volume.CustomLabels),
Name: volume.Name,
Driver: volume.Driver,
DriverOpts: volume.DriverOpts,
})
if err != nil {
- w.Event(progress.ErrorEvent(eventName))
+ s.events.On(errorEvent(eventName, err.Error()))
return err
}
- w.Event(progress.CreatedEvent(eventName))
+ s.events.On(createdEvent(eventName))
return nil
}
diff --git a/pkg/compose/create_test.go b/pkg/compose/create_test.go
index ea2ec88ad47..d6470abe2a3 100644
--- a/pkg/compose/create_test.go
+++ b/pkg/compose/create_test.go
@@ -17,16 +17,24 @@
package compose
import (
+ "context"
"os"
"path/filepath"
"sort"
"testing"
- "github.com/compose-spec/compose-go/types"
- composetypes "github.com/compose-spec/compose-go/types"
- "github.com/docker/compose/v2/pkg/api"
- moby "github.com/docker/docker/api/types"
+ composeloader "github.com/compose-spec/compose-go/v2/loader"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/image"
+ "go.uber.org/mock/gomock"
+ "gotest.tools/v3/assert/cmp"
+
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/api/types/network"
+
+ composetypes "github.com/compose-spec/compose-go/v2/types"
mountTypes "github.com/docker/docker/api/types/mount"
+
"gotest.tools/v3/assert"
)
@@ -45,6 +53,18 @@ func TestBuildBindMount(t *testing.T) {
assert.Equal(t, mount.Type, mountTypes.TypeBind)
}
+func TestBuildNamedPipeMount(t *testing.T) {
+ project := composetypes.Project{}
+ volume := composetypes.ServiceVolumeConfig{
+ Type: composetypes.VolumeTypeNamedPipe,
+ Source: "\\\\.\\pipe\\docker_engine_windows",
+ Target: "\\\\.\\pipe\\docker_engine",
+ }
+ mount, err := buildMount(project, volume)
+ assert.NilError(t, err)
+ assert.Equal(t, mount.Type, mountTypes.TypeNamedPipe)
+}
+
func TestBuildVolumeMount(t *testing.T) {
project := composetypes.Project{
Name: "myProject",
@@ -66,17 +86,17 @@ func TestBuildVolumeMount(t *testing.T) {
}
func TestServiceImageName(t *testing.T) {
- assert.Equal(t, getImageName(types.ServiceConfig{Image: "myImage"}, "myProject"), "myImage")
- assert.Equal(t, getImageName(types.ServiceConfig{Name: "aService"}, "myProject"), "myProject_aService")
+ assert.Equal(t, api.GetImageNameOrDefault(composetypes.ServiceConfig{Image: "myImage"}, "myProject"), "myImage")
+ assert.Equal(t, api.GetImageNameOrDefault(composetypes.ServiceConfig{Name: "aService"}, "myProject"), "myProject-aService")
}
func TestPrepareNetworkLabels(t *testing.T) {
- project := types.Project{
+ project := composetypes.Project{
Name: "myProject",
- Networks: types.Networks(map[string]types.NetworkConfig{"skynet": {}}),
+ Networks: composetypes.Networks(map[string]composetypes.NetworkConfig{"skynet": {}}),
}
prepareNetworks(&project)
- assert.DeepEqual(t, project.Networks["skynet"].Labels, types.Labels(map[string]string{
+ assert.DeepEqual(t, project.Networks["skynet"].CustomLabels, composetypes.Labels(map[string]string{
"com.docker.compose.network": "skynet",
"com.docker.compose.project": "myProject",
"com.docker.compose.version": api.ComposeVersion,
@@ -86,8 +106,8 @@ func TestPrepareNetworkLabels(t *testing.T) {
func TestBuildContainerMountOptions(t *testing.T) {
project := composetypes.Project{
Name: "myProject",
- Services: []composetypes.ServiceConfig{
- {
+ Services: composetypes.Services{
+ "myService": {
Name: "myService",
Volumes: []composetypes.ServiceVolumeConfig{
{
@@ -98,6 +118,19 @@ func TestBuildContainerMountOptions(t *testing.T) {
Type: composetypes.VolumeTypeVolume,
Target: "/var/myvolume2",
},
+ {
+ Type: composetypes.VolumeTypeVolume,
+ Source: "myVolume3",
+ Target: "/var/myvolume3",
+ Volume: &composetypes.ServiceVolumeVolume{
+ Subpath: "etc",
+ },
+ },
+ {
+ Type: composetypes.VolumeTypeNamedPipe,
+ Source: "\\\\.\\pipe\\docker_engine_windows",
+ Target: "\\\\.\\pipe\\docker_engine",
+ },
},
},
},
@@ -111,8 +144,8 @@ func TestBuildContainerMountOptions(t *testing.T) {
}),
}
- inherit := &moby.Container{
- Mounts: []moby.MountPoint{
+ inherit := &container.Summary{
+ Mounts: []container.MountPoint{
{
Type: composetypes.VolumeTypeVolume,
Destination: "/var/myvolume1",
@@ -124,35 +157,41 @@ func TestBuildContainerMountOptions(t *testing.T) {
},
}
- mounts, err := buildContainerMountOptions(project, project.Services[0], moby.ImageInspect{}, inherit)
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ mock, cli := prepareMocks(mockCtrl)
+ s := composeService{
+ dockerCli: cli,
+ }
+ mock.EXPECT().ImageInspect(gomock.Any(), "myProject-myService").AnyTimes().Return(image.InspectResponse{}, nil)
+
+ mounts, err := s.buildContainerMountOptions(context.TODO(), project, project.Services["myService"], inherit)
sort.Slice(mounts, func(i, j int) bool {
return mounts[i].Target < mounts[j].Target
})
assert.NilError(t, err)
- assert.Assert(t, len(mounts) == 2)
+ assert.Assert(t, len(mounts) == 4)
assert.Equal(t, mounts[0].Target, "/var/myvolume1")
assert.Equal(t, mounts[1].Target, "/var/myvolume2")
+ assert.Equal(t, mounts[2].Target, "/var/myvolume3")
+ assert.Equal(t, mounts[2].VolumeOptions.Subpath, "etc")
+ assert.Equal(t, mounts[3].Target, "\\\\.\\pipe\\docker_engine")
- mounts, err = buildContainerMountOptions(project, project.Services[0], moby.ImageInspect{}, inherit)
+ mounts, err = s.buildContainerMountOptions(context.TODO(), project, project.Services["myService"], inherit)
sort.Slice(mounts, func(i, j int) bool {
return mounts[i].Target < mounts[j].Target
})
assert.NilError(t, err)
- assert.Assert(t, len(mounts) == 2)
+ assert.Assert(t, len(mounts) == 4)
assert.Equal(t, mounts[0].Target, "/var/myvolume1")
assert.Equal(t, mounts[1].Target, "/var/myvolume2")
+ assert.Equal(t, mounts[2].Target, "/var/myvolume3")
+ assert.Equal(t, mounts[2].VolumeOptions.Subpath, "etc")
+ assert.Equal(t, mounts[3].Target, "\\\\.\\pipe\\docker_engine")
}
-func TestGetBindMode(t *testing.T) {
- assert.Equal(t, getBindMode(&composetypes.ServiceVolumeBind{}, false), "rw")
- assert.Equal(t, getBindMode(&composetypes.ServiceVolumeBind{}, true), "ro")
- assert.Equal(t, getBindMode(&composetypes.ServiceVolumeBind{SELinux: composetypes.SELinuxShared}, false), "rw,z")
- assert.Equal(t, getBindMode(&composetypes.ServiceVolumeBind{SELinux: composetypes.SELinuxPrivate}, false), "rw,Z")
- assert.Equal(t, getBindMode(&composetypes.ServiceVolumeBind{SELinux: composetypes.SELinuxShared}, true), "ro,z")
- assert.Equal(t, getBindMode(&composetypes.ServiceVolumeBind{SELinux: composetypes.SELinuxPrivate}, true), "ro,Z")
-}
-
-func TestGetDefaultNetworkMode(t *testing.T) {
+func TestDefaultNetworkSettings(t *testing.T) {
t.Run("returns the network with the highest priority when service has multiple networks", func(t *testing.T) {
service := composetypes.ServiceConfig{
Name: "myService",
@@ -167,8 +206,8 @@ func TestGetDefaultNetworkMode(t *testing.T) {
}
project := composetypes.Project{
Name: "myProject",
- Services: []composetypes.ServiceConfig{
- service,
+ Services: composetypes.Services{
+ "myService": service,
},
Networks: composetypes.Networks(map[string]composetypes.NetworkConfig{
"myNetwork1": {
@@ -180,7 +219,11 @@ func TestGetDefaultNetworkMode(t *testing.T) {
}),
}
- assert.Equal(t, getDefaultNetworkMode(&project, service), "myProject_myNetwork2")
+ networkMode, networkConfig, err := defaultNetworkSettings(&project, service, 1, nil, true, "1.43")
+ assert.NilError(t, err)
+ assert.Equal(t, string(networkMode), "myProject_myNetwork2")
+ assert.Check(t, cmp.Len(networkConfig.EndpointsConfig, 1))
+ assert.Check(t, cmp.Contains(networkConfig.EndpointsConfig, "myProject_myNetwork2"))
})
t.Run("returns default network when service has no networks", func(t *testing.T) {
@@ -189,8 +232,8 @@ func TestGetDefaultNetworkMode(t *testing.T) {
}
project := composetypes.Project{
Name: "myProject",
- Services: []composetypes.ServiceConfig{
- service,
+ Services: composetypes.Services{
+ "myService": service,
},
Networks: composetypes.Networks(map[string]composetypes.NetworkConfig{
"myNetwork1": {
@@ -205,7 +248,11 @@ func TestGetDefaultNetworkMode(t *testing.T) {
}),
}
- assert.Equal(t, getDefaultNetworkMode(&project, service), "myProject_default")
+ networkMode, networkConfig, err := defaultNetworkSettings(&project, service, 1, nil, true, "1.43")
+ assert.NilError(t, err)
+ assert.Equal(t, string(networkMode), "myProject_default")
+ assert.Check(t, cmp.Len(networkConfig.EndpointsConfig, 1))
+ assert.Check(t, cmp.Contains(networkConfig.EndpointsConfig, "myProject_default"))
})
t.Run("returns none if project has no networks", func(t *testing.T) {
@@ -214,11 +261,199 @@ func TestGetDefaultNetworkMode(t *testing.T) {
}
project := composetypes.Project{
Name: "myProject",
- Services: []composetypes.ServiceConfig{
- service,
+ Services: composetypes.Services{
+ "myService": service,
},
}
- assert.Equal(t, getDefaultNetworkMode(&project, service), "none")
+ networkMode, networkConfig, err := defaultNetworkSettings(&project, service, 1, nil, true, "1.43")
+ assert.NilError(t, err)
+ assert.Equal(t, string(networkMode), "none")
+ assert.Check(t, cmp.Nil(networkConfig))
})
+
+ t.Run("returns defined network mode if explicitly set", func(t *testing.T) {
+ service := composetypes.ServiceConfig{
+ Name: "myService",
+ NetworkMode: "host",
+ }
+ project := composetypes.Project{
+ Name: "myProject",
+ Services: composetypes.Services{"myService": service},
+ Networks: composetypes.Networks(map[string]composetypes.NetworkConfig{
+ "default": {
+ Name: "myProject_default",
+ },
+ }),
+ }
+
+ networkMode, networkConfig, err := defaultNetworkSettings(&project, service, 1, nil, true, "1.43")
+ assert.NilError(t, err)
+ assert.Equal(t, string(networkMode), "host")
+ assert.Check(t, cmp.Nil(networkConfig))
+ })
+}
+
+func TestCreateEndpointSettings(t *testing.T) {
+ eps := createEndpointSettings(&composetypes.Project{
+ Name: "projName",
+ }, composetypes.ServiceConfig{
+ Name: "serviceName",
+ ContainerName: "containerName",
+ Networks: map[string]*composetypes.ServiceNetworkConfig{
+ "netName": {
+ Priority: 100,
+ Aliases: []string{"alias1", "alias2"},
+ Ipv4Address: "10.16.17.18",
+ Ipv6Address: "fdb4:7a7f:373a:3f0c::42",
+ LinkLocalIPs: []string{"169.254.10.20"},
+ MacAddress: "10:00:00:00:01",
+ DriverOpts: composetypes.Options{
+ "driverOpt1": "optval1",
+ "driverOpt2": "optval2",
+ },
+ },
+ },
+ }, 0, "netName", []string{"link1", "link2"}, true)
+ assert.Check(t, cmp.DeepEqual(eps, &network.EndpointSettings{
+ IPAMConfig: &network.EndpointIPAMConfig{
+ IPv4Address: "10.16.17.18",
+ IPv6Address: "fdb4:7a7f:373a:3f0c::42",
+ LinkLocalIPs: []string{"169.254.10.20"},
+ },
+ Links: []string{"link1", "link2"},
+ Aliases: []string{"containerName", "serviceName", "alias1", "alias2"},
+ MacAddress: "10:00:00:00:01",
+ DriverOpts: map[string]string{
+ "driverOpt1": "optval1",
+ "driverOpt2": "optval2",
+ },
+
+ // FIXME(robmry) - IPAddress and IPv6Gateway are "operational data" fields...
+ // - The IPv6 address here is the container's address, not the gateway.
+ // - Both fields will be cleared by the daemon, but they could be removed from
+ // the request.
+ IPAddress: "10.16.17.18",
+ IPv6Gateway: "fdb4:7a7f:373a:3f0c::42",
+ }))
+}
+
+func Test_buildContainerVolumes(t *testing.T) {
+ pwd, err := os.Getwd()
+ assert.NilError(t, err)
+
+ tests := []struct {
+ name string
+ yaml string
+ binds []string
+ mounts []mountTypes.Mount
+ }{
+ {
+ name: "bind mount local path",
+ yaml: `
+services:
+ test:
+ volumes:
+ - ./data:/data
+`,
+ binds: []string{filepath.Join(pwd, "data") + ":/data:rw"},
+ mounts: nil,
+ },
+ {
+ name: "bind mount, not create host path",
+ yaml: `
+services:
+ test:
+ volumes:
+ - type: bind
+ source: ./data
+ target: /data
+ bind:
+ create_host_path: false
+`,
+ binds: nil,
+ mounts: []mountTypes.Mount{
+ {
+ Type: "bind",
+ Source: filepath.Join(pwd, "data"),
+ Target: "/data",
+ BindOptions: &mountTypes.BindOptions{CreateMountpoint: false},
+ },
+ },
+ },
+ {
+ name: "mount volume",
+ yaml: `
+services:
+ test:
+ volumes:
+ - data:/data
+volumes:
+ data:
+ name: my_volume
+`,
+ binds: []string{"my_volume:/data:rw"},
+ mounts: nil,
+ },
+ {
+ name: "mount volume, readonly",
+ yaml: `
+services:
+ test:
+ volumes:
+ - data:/data:ro
+volumes:
+ data:
+ name: my_volume
+`,
+ binds: []string{"my_volume:/data:ro"},
+ mounts: nil,
+ },
+ {
+ name: "mount volume subpath",
+ yaml: `
+services:
+ test:
+ volumes:
+ - type: volume
+ source: data
+ target: /data
+ volume:
+ subpath: test/
+volumes:
+ data:
+ name: my_volume
+`,
+ binds: nil,
+ mounts: []mountTypes.Mount{
+ {
+ Type: "volume",
+ Source: "my_volume",
+ Target: "/data",
+ VolumeOptions: &mountTypes.VolumeOptions{Subpath: "test/"},
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ p, err := composeloader.LoadWithContext(context.TODO(), composetypes.ConfigDetails{
+ ConfigFiles: []composetypes.ConfigFile{
+ {
+ Filename: "test",
+ Content: []byte(tt.yaml),
+ },
+ },
+ }, func(options *composeloader.Options) {
+ options.SkipValidation = true
+ options.SkipConsistencyCheck = true
+ })
+ assert.NilError(t, err)
+ s := &composeService{}
+ binds, mounts, err := s.buildContainerVolumes(context.TODO(), *p, p.Services["test"], nil)
+ assert.NilError(t, err)
+ assert.DeepEqual(t, tt.binds, binds)
+ assert.DeepEqual(t, tt.mounts, mounts)
+ })
+ }
}
diff --git a/pkg/compose/dependencies.go b/pkg/compose/dependencies.go
index 2147862b845..c5d3b35da94 100644
--- a/pkg/compose/dependencies.go
+++ b/pkg/compose/dependencies.go
@@ -19,13 +19,13 @@ package compose
import (
"context"
"fmt"
+ "slices"
"strings"
"sync"
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/compose/v5/pkg/api"
"golang.org/x/sync/errgroup"
-
- "github.com/docker/compose/v2/pkg/utils"
)
// ServiceStatus indicates the status of a service
@@ -37,80 +37,170 @@ const (
ServiceStarted
)
-type graphTraversalConfig struct {
+type graphTraversal struct {
+ mu sync.Mutex
+ seen map[string]struct{}
+ ignored map[string]struct{}
+
extremityNodesFn func(*Graph) []*Vertex // leaves or roots
adjacentNodesFn func(*Vertex) []*Vertex // getParents or getChildren
filterAdjacentByStatusFn func(*Graph, string, ServiceStatus) []*Vertex // filterChildren or filterParents
targetServiceStatus ServiceStatus
adjacentServiceStatusToSkip ServiceStatus
+
+ visitorFn func(context.Context, string) error
+ maxConcurrency int
}
-var (
- upDirectionTraversalConfig = graphTraversalConfig{
+func upDirectionTraversal(visitorFn func(context.Context, string) error) *graphTraversal {
+ return &graphTraversal{
extremityNodesFn: leaves,
adjacentNodesFn: getParents,
filterAdjacentByStatusFn: filterChildren,
adjacentServiceStatusToSkip: ServiceStopped,
targetServiceStatus: ServiceStarted,
+ visitorFn: visitorFn,
}
- downDirectionTraversalConfig = graphTraversalConfig{
+}
+
+func downDirectionTraversal(visitorFn func(context.Context, string) error) *graphTraversal {
+ return &graphTraversal{
extremityNodesFn: roots,
adjacentNodesFn: getChildren,
filterAdjacentByStatusFn: filterParents,
adjacentServiceStatusToSkip: ServiceStarted,
targetServiceStatus: ServiceStopped,
+ visitorFn: visitorFn,
}
-)
+}
// InDependencyOrder applies the function to the services of the project taking in account the dependency order
-func InDependencyOrder(ctx context.Context, project *types.Project, fn func(context.Context, string) error) error {
- return visit(ctx, project, upDirectionTraversalConfig, fn, ServiceStopped)
+func InDependencyOrder(ctx context.Context, project *types.Project, fn func(context.Context, string) error, options ...func(*graphTraversal)) error {
+ graph, err := NewGraph(project, ServiceStopped)
+ if err != nil {
+ return err
+ }
+ t := upDirectionTraversal(fn)
+ for _, option := range options {
+ option(t)
+ }
+ return t.visit(ctx, graph)
}
// InReverseDependencyOrder applies the function to the services of the project in reverse order of dependencies
-func InReverseDependencyOrder(ctx context.Context, project *types.Project, fn func(context.Context, string) error) error {
- return visit(ctx, project, downDirectionTraversalConfig, fn, ServiceStarted)
+func InReverseDependencyOrder(ctx context.Context, project *types.Project, fn func(context.Context, string) error, options ...func(*graphTraversal)) error {
+ graph, err := NewGraph(project, ServiceStarted)
+ if err != nil {
+ return err
+ }
+ t := downDirectionTraversal(fn)
+ for _, option := range options {
+ option(t)
+ }
+ return t.visit(ctx, graph)
}
-func visit(ctx context.Context, project *types.Project, traversalConfig graphTraversalConfig, fn func(context.Context, string) error, initialStatus ServiceStatus) error {
- g := NewGraph(project.Services, initialStatus)
- if b, err := g.HasCycles(); b {
- return err
+func WithRootNodesAndDown(nodes []string) func(*graphTraversal) {
+ return func(t *graphTraversal) {
+ if len(nodes) == 0 {
+ return
+ }
+ originalFn := t.extremityNodesFn
+ t.extremityNodesFn = func(graph *Graph) []*Vertex {
+ var want []string
+ for _, node := range nodes {
+ vertex := graph.Vertices[node]
+ want = append(want, vertex.Service)
+ for _, v := range getAncestors(vertex) {
+ want = append(want, v.Service)
+ }
+ }
+
+ t.ignored = map[string]struct{}{}
+ for k := range graph.Vertices {
+ if !slices.Contains(want, k) {
+ t.ignored[k] = struct{}{}
+ }
+ }
+
+ return originalFn(graph)
+ }
}
+}
- nodes := traversalConfig.extremityNodesFn(g)
+func (t *graphTraversal) visit(ctx context.Context, g *Graph) error {
+ expect := len(g.Vertices)
+ if expect == 0 {
+ return nil
+ }
- eg, _ := errgroup.WithContext(ctx)
+ eg, ctx := errgroup.WithContext(ctx)
+ if t.maxConcurrency > 0 {
+ eg.SetLimit(t.maxConcurrency + 1)
+ }
+ nodeCh := make(chan *Vertex, expect)
+ defer close(nodeCh)
+ // nodeCh need to allow n=expect writers while reader goroutine could have returner after ctx.Done
eg.Go(func() error {
- return run(ctx, g, eg, nodes, traversalConfig, fn)
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+ case node := <-nodeCh:
+ expect--
+ if expect == 0 {
+ return nil
+ }
+ t.run(ctx, g, eg, t.adjacentNodesFn(node), nodeCh)
+ }
+ }
})
+ nodes := t.extremityNodesFn(g)
+ t.run(ctx, g, eg, nodes, nodeCh)
+
return eg.Wait()
}
// Note: this could be `graph.walk` or whatever
-func run(ctx context.Context, graph *Graph, eg *errgroup.Group, nodes []*Vertex, traversalConfig graphTraversalConfig, fn func(context.Context, string) error) error {
+func (t *graphTraversal) run(ctx context.Context, graph *Graph, eg *errgroup.Group, nodes []*Vertex, nodeCh chan *Vertex) {
for _, node := range nodes {
// Don't start this service yet if all of its children have
// not been started yet.
- if len(traversalConfig.filterAdjacentByStatusFn(graph, node.Key, traversalConfig.adjacentServiceStatusToSkip)) != 0 {
+ if len(t.filterAdjacentByStatusFn(graph, node.Key, t.adjacentServiceStatusToSkip)) != 0 {
+ continue
+ }
+
+ if !t.consume(node.Key) {
+ // another worker already visited this node
continue
}
- node := node
eg.Go(func() error {
- err := fn(ctx, node.Service)
- if err != nil {
- return err
+ var err error
+ if _, ignore := t.ignored[node.Service]; !ignore {
+ err = t.visitorFn(ctx, node.Service)
}
-
- graph.UpdateStatus(node.Key, traversalConfig.targetServiceStatus)
-
- return run(ctx, graph, eg, traversalConfig.adjacentNodesFn(node), traversalConfig, fn)
+ if err == nil {
+ graph.UpdateStatus(node.Key, t.targetServiceStatus)
+ }
+ nodeCh <- node
+ return err
})
}
+}
- return nil
+func (t *graphTraversal) consume(nodeKey string) bool {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.seen == nil {
+ t.seen = make(map[string]struct{})
+ }
+ if _, ok := t.seen[nodeKey]; ok {
+ return false
+ }
+ t.seen[nodeKey] = struct{}{}
+ return true
}
// Graph represents project as service dependencies
@@ -132,7 +222,7 @@ func getParents(v *Vertex) []*Vertex {
return v.GetParents()
}
-// GetParents returns a slice with the parent vertexes of the a Vertex
+// GetParents returns a slice with the parent vertices of the Vertex
func (v *Vertex) GetParents() []*Vertex {
var res []*Vertex
for _, p := range v.Parents {
@@ -145,7 +235,17 @@ func getChildren(v *Vertex) []*Vertex {
return v.GetChildren()
}
-// GetChildren returns a slice with the child vertexes of the a Vertex
+// getAncestors return all descendents for a vertex, might contain duplicates
+func getAncestors(v *Vertex) []*Vertex {
+ var descendents []*Vertex
+ for _, parent := range v.GetParents() {
+ descendents = append(descendents, parent)
+ descendents = append(descendents, getAncestors(parent)...)
+ }
+ return descendents
+}
+
+// GetChildren returns a slice with the child vertices of the Vertex
func (v *Vertex) GetChildren() []*Vertex {
var res []*Vertex
for _, p := range v.Children {
@@ -155,23 +255,41 @@ func (v *Vertex) GetChildren() []*Vertex {
}
// NewGraph returns the dependency graph of the services
-func NewGraph(services types.Services, initialStatus ServiceStatus) *Graph {
+func NewGraph(project *types.Project, initialStatus ServiceStatus) (*Graph, error) {
graph := &Graph{
lock: sync.RWMutex{},
Vertices: map[string]*Vertex{},
}
- for _, s := range services {
+ for _, s := range project.Services {
graph.AddVertex(s.Name, s.Name, initialStatus)
}
- for _, s := range services {
+ for index, s := range project.Services {
for _, name := range s.GetDependencies() {
- _ = graph.AddEdge(s.Name, name)
+ err := graph.AddEdge(s.Name, name)
+ if err != nil {
+ if !s.DependsOn[name].Required {
+ delete(s.DependsOn, name)
+ project.Services[index] = s
+ continue
+ }
+ if api.IsNotFoundError(err) {
+ ds, err := project.GetDisabledService(name)
+ if err == nil {
+ return nil, fmt.Errorf("service %s is required by %s but is disabled. Can be enabled by profiles %s", name, s.Name, ds.Profiles)
+ }
+ }
+ return nil, err
+ }
}
}
- return graph
+ if b, err := graph.HasCycles(); b {
+ return nil, err
+ }
+
+ return graph, nil
}
// NewVertex is the constructor function for the Vertex
@@ -194,7 +312,7 @@ func (g *Graph) AddVertex(key string, service string, initialStatus ServiceStatu
g.Vertices[key] = v
}
-// AddEdge adds a relationship of dependency between vertexes `source` and `destination`
+// AddEdge adds a relationship of dependency between vertices `source` and `destination`
func (g *Graph) AddEdge(source string, destination string) error {
g.lock.Lock()
defer g.lock.Unlock()
@@ -203,10 +321,10 @@ func (g *Graph) AddEdge(source string, destination string) error {
destinationVertex := g.Vertices[destination]
if sourceVertex == nil {
- return fmt.Errorf("could not find %s", source)
+ return fmt.Errorf("could not find %s: %w", source, api.ErrNotFound)
}
if destinationVertex == nil {
- return fmt.Errorf("could not find %s", destination)
+ return fmt.Errorf("could not find %s: %w", destination, api.ErrNotFound)
}
// If they are already connected
@@ -315,10 +433,9 @@ func (g *Graph) HasCycles() (bool, error) {
path := []string{
vertex.Key,
}
- if !utils.StringContains(discovered, vertex.Key) && !utils.StringContains(finished, vertex.Key) {
+ if !slices.Contains(discovered, vertex.Key) && !slices.Contains(finished, vertex.Key) {
var err error
discovered, finished, err = g.visit(vertex.Key, path, discovered, finished)
-
if err != nil {
return true, err
}
@@ -333,11 +450,11 @@ func (g *Graph) visit(key string, path []string, discovered []string, finished [
for _, v := range g.Vertices[key].Children {
path := append(path, v.Key)
- if utils.StringContains(discovered, v.Key) {
+ if slices.Contains(discovered, v.Key) {
return nil, nil, fmt.Errorf("cycle found: %s", strings.Join(path, " -> "))
}
- if !utils.StringContains(finished, v.Key) {
+ if !slices.Contains(finished, v.Key) {
if _, _, err := g.visit(v.Key, path, discovered, finished); err != nil {
return nil, nil, err
}
diff --git a/pkg/compose/dependencies_test.go b/pkg/compose/dependencies_test.go
index 5d5871a5470..904d975fb3f 100644
--- a/pkg/compose/dependencies_test.go
+++ b/pkg/compose/dependencies_test.go
@@ -18,52 +18,420 @@ package compose
import (
"context"
+ "fmt"
+ "sort"
+ "sync"
"testing"
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/compose/v5/pkg/utils"
+ testify "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"gotest.tools/v3/assert"
)
-var project = types.Project{
- Services: []types.ServiceConfig{
- {
- Name: "test1",
- DependsOn: map[string]types.ServiceDependency{
- "test2": {},
+func createTestProject() *types.Project {
+ return &types.Project{
+ Services: types.Services{
+ "test1": {
+ Name: "test1",
+ DependsOn: map[string]types.ServiceDependency{
+ "test2": {},
+ },
},
- },
- {
- Name: "test2",
- DependsOn: map[string]types.ServiceDependency{
- "test3": {},
+ "test2": {
+ Name: "test2",
+ DependsOn: map[string]types.ServiceDependency{
+ "test3": {},
+ },
+ },
+ "test3": {
+ Name: "test3",
},
},
- {
- Name: "test3",
- },
- },
+ }
+}
+
+func TestTraversalWithMultipleParents(t *testing.T) {
+ dependent := types.ServiceConfig{
+ Name: "dependent",
+ DependsOn: make(types.DependsOnConfig),
+ }
+
+ project := types.Project{
+ Services: types.Services{"dependent": dependent},
+ }
+
+ for i := 1; i <= 100; i++ {
+ name := fmt.Sprintf("svc_%d", i)
+ dependent.DependsOn[name] = types.ServiceDependency{}
+
+ svc := types.ServiceConfig{Name: name}
+ project.Services[name] = svc
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+
+ svc := make(chan string, 10)
+ seen := make(map[string]int)
+ done := make(chan struct{})
+ go func() {
+ for service := range svc {
+ seen[service]++
+ }
+ done <- struct{}{}
+ }()
+
+ err := InDependencyOrder(ctx, &project, func(ctx context.Context, service string) error {
+ svc <- service
+ return nil
+ })
+ require.NoError(t, err, "Error during iteration")
+ close(svc)
+ <-done
+
+ testify.Len(t, seen, 101)
+ for svc, count := range seen {
+ assert.Equal(t, 1, count, "Service: %s", svc)
+ }
}
func TestInDependencyUpCommandOrder(t *testing.T) {
- order := make(chan string)
- //nolint:errcheck, unparam
- go InDependencyOrder(context.TODO(), &project, func(ctx context.Context, config string) error {
- order <- config
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+
+ var order []string
+ err := InDependencyOrder(ctx, createTestProject(), func(ctx context.Context, service string) error {
+ order = append(order, service)
return nil
})
- assert.Equal(t, <-order, "test3")
- assert.Equal(t, <-order, "test2")
- assert.Equal(t, <-order, "test1")
+ require.NoError(t, err, "Error during iteration")
+ require.Equal(t, []string{"test3", "test2", "test1"}, order)
}
func TestInDependencyReverseDownCommandOrder(t *testing.T) {
- order := make(chan string)
- //nolint:errcheck, unparam
- go InReverseDependencyOrder(context.TODO(), &project, func(ctx context.Context, config string) error {
- order <- config
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+
+ var order []string
+ err := InReverseDependencyOrder(ctx, createTestProject(), func(ctx context.Context, service string) error {
+ order = append(order, service)
return nil
})
- assert.Equal(t, <-order, "test1")
- assert.Equal(t, <-order, "test2")
- assert.Equal(t, <-order, "test3")
+ require.NoError(t, err, "Error during iteration")
+ require.Equal(t, []string{"test1", "test2", "test3"}, order)
+}
+
+func TestBuildGraph(t *testing.T) {
+ testCases := []struct {
+ desc string
+ services types.Services
+ expectedVertices map[string]*Vertex
+ }{
+ {
+ desc: "builds graph with single service",
+ services: types.Services{
+ "test": {
+ Name: "test",
+ DependsOn: types.DependsOnConfig{},
+ },
+ },
+ expectedVertices: map[string]*Vertex{
+ "test": {
+ Key: "test",
+ Service: "test",
+ Status: ServiceStopped,
+ Children: map[string]*Vertex{},
+ Parents: map[string]*Vertex{},
+ },
+ },
+ },
+ {
+ desc: "builds graph with two separate services",
+ services: types.Services{
+ "test": {
+ Name: "test",
+ DependsOn: types.DependsOnConfig{},
+ },
+ "another": {
+ Name: "another",
+ DependsOn: types.DependsOnConfig{},
+ },
+ },
+ expectedVertices: map[string]*Vertex{
+ "test": {
+ Key: "test",
+ Service: "test",
+ Status: ServiceStopped,
+ Children: map[string]*Vertex{},
+ Parents: map[string]*Vertex{},
+ },
+ "another": {
+ Key: "another",
+ Service: "another",
+ Status: ServiceStopped,
+ Children: map[string]*Vertex{},
+ Parents: map[string]*Vertex{},
+ },
+ },
+ },
+ {
+ desc: "builds graph with a service and a dependency",
+ services: types.Services{
+ "test": {
+ Name: "test",
+ DependsOn: types.DependsOnConfig{
+ "another": types.ServiceDependency{},
+ },
+ },
+ "another": {
+ Name: "another",
+ DependsOn: types.DependsOnConfig{},
+ },
+ },
+ expectedVertices: map[string]*Vertex{
+ "test": {
+ Key: "test",
+ Service: "test",
+ Status: ServiceStopped,
+ Children: map[string]*Vertex{
+ "another": {},
+ },
+ Parents: map[string]*Vertex{},
+ },
+ "another": {
+ Key: "another",
+ Service: "another",
+ Status: ServiceStopped,
+ Children: map[string]*Vertex{},
+ Parents: map[string]*Vertex{
+ "test": {},
+ },
+ },
+ },
+ },
+ {
+ desc: "builds graph with multiple dependency levels",
+ services: types.Services{
+ "test": {
+ Name: "test",
+ DependsOn: types.DependsOnConfig{
+ "another": types.ServiceDependency{},
+ },
+ },
+ "another": {
+ Name: "another",
+ DependsOn: types.DependsOnConfig{
+ "another_dep": types.ServiceDependency{},
+ },
+ },
+ "another_dep": {
+ Name: "another_dep",
+ DependsOn: types.DependsOnConfig{},
+ },
+ },
+ expectedVertices: map[string]*Vertex{
+ "test": {
+ Key: "test",
+ Service: "test",
+ Status: ServiceStopped,
+ Children: map[string]*Vertex{
+ "another": {},
+ },
+ Parents: map[string]*Vertex{},
+ },
+ "another": {
+ Key: "another",
+ Service: "another",
+ Status: ServiceStopped,
+ Children: map[string]*Vertex{
+ "another_dep": {},
+ },
+ Parents: map[string]*Vertex{
+ "test": {},
+ },
+ },
+ "another_dep": {
+ Key: "another_dep",
+ Service: "another_dep",
+ Status: ServiceStopped,
+ Children: map[string]*Vertex{},
+ Parents: map[string]*Vertex{
+ "another": {},
+ },
+ },
+ },
+ },
+ }
+ for _, tC := range testCases {
+ t.Run(tC.desc, func(t *testing.T) {
+ project := types.Project{
+ Services: tC.services,
+ }
+
+ graph, err := NewGraph(&project, ServiceStopped)
+ assert.NilError(t, err, fmt.Sprintf("failed to build graph for: %s", tC.desc))
+
+ for k, vertex := range graph.Vertices {
+ expected, ok := tC.expectedVertices[k]
+ assert.Equal(t, true, ok)
+ assert.Equal(t, true, isVertexEqual(*expected, *vertex))
+ }
+ })
+ }
+}
+
+func TestBuildGraphDependsOn(t *testing.T) {
+ testCases := []struct {
+ desc string
+ services types.Services
+ expectedVertices map[string]*Vertex
+ }{
+ {
+ desc: "service depends on init container which is already removed",
+ services: types.Services{
+ "test": {
+ Name: "test",
+ DependsOn: types.DependsOnConfig{
+ "test-removed-init-container": types.ServiceDependency{
+ Condition: "service_completed_successfully",
+ Restart: false,
+ Extensions: types.Extensions(nil),
+ Required: false,
+ },
+ },
+ },
+ },
+ expectedVertices: map[string]*Vertex{
+ "test": {
+ Key: "test",
+ Service: "test",
+ Status: ServiceStopped,
+ Children: map[string]*Vertex{},
+ Parents: map[string]*Vertex{},
+ },
+ },
+ },
+ }
+ for _, tC := range testCases {
+ t.Run(tC.desc, func(t *testing.T) {
+ project := types.Project{
+ Services: tC.services,
+ }
+
+ graph, err := NewGraph(&project, ServiceStopped)
+ assert.NilError(t, err, fmt.Sprintf("failed to build graph for: %s", tC.desc))
+
+ for k, vertex := range graph.Vertices {
+ expected, ok := tC.expectedVertices[k]
+ assert.Equal(t, true, ok)
+ assert.Equal(t, true, isVertexEqual(*expected, *vertex))
+ }
+ })
+ }
+}
+
+func isVertexEqual(a, b Vertex) bool {
+ childrenEquality := true
+ for c := range a.Children {
+ if _, ok := b.Children[c]; !ok {
+ childrenEquality = false
+ }
+ }
+ parentEquality := true
+ for p := range a.Parents {
+ if _, ok := b.Parents[p]; !ok {
+ parentEquality = false
+ }
+ }
+ return a.Key == b.Key &&
+ a.Service == b.Service &&
+ childrenEquality &&
+ parentEquality
+}
+
+func TestWith_RootNodesAndUp(t *testing.T) {
+ graph := &Graph{
+ lock: sync.RWMutex{},
+ Vertices: map[string]*Vertex{},
+ }
+
+ /** graph topology:
+ A B
+ / \ / \
+ G C E
+ \ /
+ D
+ |
+ F
+ */
+
+ graph.AddVertex("A", "A", 0)
+ graph.AddVertex("B", "B", 0)
+ graph.AddVertex("C", "C", 0)
+ graph.AddVertex("D", "D", 0)
+ graph.AddVertex("E", "E", 0)
+ graph.AddVertex("F", "F", 0)
+ graph.AddVertex("G", "G", 0)
+
+ _ = graph.AddEdge("C", "A")
+ _ = graph.AddEdge("C", "B")
+ _ = graph.AddEdge("E", "B")
+ _ = graph.AddEdge("D", "C")
+ _ = graph.AddEdge("D", "E")
+ _ = graph.AddEdge("F", "D")
+ _ = graph.AddEdge("G", "A")
+
+ tests := []struct {
+ name string
+ nodes []string
+ want []string
+ }{
+ {
+ name: "whole graph",
+ nodes: []string{"A", "B"},
+ want: []string{"A", "B", "C", "D", "E", "F", "G"},
+ },
+ {
+ name: "only leaves",
+ nodes: []string{"F", "G"},
+ want: []string{"F", "G"},
+ },
+ {
+ name: "simple dependent",
+ nodes: []string{"D"},
+ want: []string{"D", "F"},
+ },
+ {
+ name: "diamond dependents",
+ nodes: []string{"B"},
+ want: []string{"B", "C", "D", "E", "F"},
+ },
+ {
+ name: "partial graph",
+ nodes: []string{"A"},
+ want: []string{"A", "C", "D", "F", "G"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mx := sync.Mutex{}
+ expected := utils.Set[string]{}
+ expected.AddAll("C", "G", "D", "F")
+ var visited []string
+
+ gt := downDirectionTraversal(func(ctx context.Context, s string) error {
+ mx.Lock()
+ defer mx.Unlock()
+ visited = append(visited, s)
+ return nil
+ })
+ WithRootNodesAndDown(tt.nodes)(gt)
+ err := gt.visit(context.TODO(), graph)
+ assert.NilError(t, err)
+ sort.Strings(visited)
+ assert.DeepEqual(t, tt.want, visited)
+ })
+ }
}
diff --git a/pkg/compose/desktop.go b/pkg/compose/desktop.go
new file mode 100644
index 00000000000..9b985407b23
--- /dev/null
+++ b/pkg/compose/desktop.go
@@ -0,0 +1,41 @@
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "strings"
+)
+
+// engineLabelDesktopAddress is used to detect that Compose is running with a
+// Docker Desktop context. When this label is present, the value is an endpoint
+// address for an in-memory socket (AF_UNIX or named pipe).
+const engineLabelDesktopAddress = "com.docker.desktop.address"
+
+func (s *composeService) isDesktopIntegrationActive(ctx context.Context) (bool, error) {
+ info, err := s.apiClient().Info(ctx)
+ if err != nil {
+ return false, err
+ }
+ for _, l := range info.Labels {
+ k, _, ok := strings.Cut(l, "=")
+ if ok && k == engineLabelDesktopAddress {
+ return true, nil
+ }
+ }
+ return false, nil
+}
diff --git a/pkg/compose/docker_cli_providers.go b/pkg/compose/docker_cli_providers.go
new file mode 100644
index 00000000000..207fa3e37a7
--- /dev/null
+++ b/pkg/compose/docker_cli_providers.go
@@ -0,0 +1,38 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "github.com/docker/cli/cli/command"
+)
+
+// dockerCliContextInfo implements api.ContextInfo using Docker CLI
+type dockerCliContextInfo struct {
+ cli command.Cli
+}
+
+func (c *dockerCliContextInfo) CurrentContext() string {
+ return c.cli.CurrentContext()
+}
+
+func (c *dockerCliContextInfo) ServerOSType() string {
+ return c.cli.ServerInfo().OSType
+}
+
+func (c *dockerCliContextInfo) BuildKitEnabled() (bool, error) {
+ return c.cli.BuildKitEnabled()
+}
diff --git a/pkg/compose/down.go b/pkg/compose/down.go
index 28d439010ff..4d57bcfed03 100644
--- a/pkg/compose/down.go
+++ b/pkg/compose/down.go
@@ -22,30 +22,34 @@ import (
"strings"
"time"
- "github.com/compose-spec/compose-go/types"
- "github.com/docker/cli/cli/registry/client"
- moby "github.com/docker/docker/api/types"
- "github.com/docker/docker/errdefs"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/containerd/errdefs"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/utils"
+ containerType "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ imageapi "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/network"
+ "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
-
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/progress"
)
type downOp func() error
func (s *composeService) Down(ctx context.Context, projectName string, options api.DownOptions) error {
- return progress.Run(ctx, func(ctx context.Context) error {
+ return Run(ctx, func(ctx context.Context) error {
return s.down(ctx, strings.ToLower(projectName), options)
- })
+ }, "down", s.events)
}
-func (s *composeService) down(ctx context.Context, projectName string, options api.DownOptions) error {
- w := progress.ContextWriter(ctx)
+func (s *composeService) down(ctx context.Context, projectName string, options api.DownOptions) error { //nolint:gocyclo
resourceToRemove := false
- var containers Containers
- containers, err := s.getContainers(ctx, projectName, oneOffInclude, true)
+ include := oneOffExclude
+ if options.RemoveOrphans {
+ include = oneOffInclude
+ }
+ containers, err := s.getContainers(ctx, projectName, include, true)
if err != nil {
return err
}
@@ -58,39 +62,60 @@ func (s *composeService) down(ctx context.Context, projectName string, options a
}
}
+ // Check requested services exists in model
+ services, err := checkSelectedServices(options, project)
+ if err != nil {
+ return err
+ }
+
+ if len(options.Services) > 0 && len(services) == 0 {
+ logrus.Infof("Any of the services %v not running in project %q", options.Services, projectName)
+ return nil
+ }
+
+ options.Services = services
+
if len(containers) > 0 {
resourceToRemove = true
}
err = InReverseDependencyOrder(ctx, project, func(c context.Context, service string) error {
+ serv := project.Services[service]
+ if serv.Provider != nil {
+ return s.runPlugin(ctx, project, serv, "down")
+ }
serviceContainers := containers.filter(isService(service))
- err := s.removeContainers(ctx, w, serviceContainers, options.Timeout, options.Volumes)
+ err := s.removeContainers(ctx, serviceContainers, &serv, options.Timeout, options.Volumes)
return err
- })
+ }, WithRootNodesAndDown(options.Services))
if err != nil {
return err
}
- orphans := containers.filter(isNotService(project.ServiceNames()...))
+ orphans := containers.filter(isOrphaned(project))
if options.RemoveOrphans && len(orphans) > 0 {
- err := s.removeContainers(ctx, w, orphans, options.Timeout, false)
+ err := s.removeContainers(ctx, orphans, nil, options.Timeout, false)
if err != nil {
return err
}
}
- ops := s.ensureNetworksDown(ctx, project, w)
+ ops := s.ensureNetworksDown(ctx, project)
if options.Images != "" {
- ops = append(ops, s.ensureImagesDown(ctx, project, options, w)...)
+ imgOps, err := s.ensureImagesDown(ctx, project, options)
+ if err != nil {
+ return err
+ }
+ ops = append(ops, imgOps...)
}
if options.Volumes {
- ops = append(ops, s.ensureVolumesDown(ctx, project, w)...)
+ ops = append(ops, s.ensureVolumesDown(ctx, project)...)
}
if !resourceToRemove && len(ops) == 0 {
- w.Event(progress.NewEvent(projectName, progress.Done, "Warning: No resource found to remove"))
+ logrus.Warnf("Warning: No resource found to remove for project %q.", projectName)
}
eg, _ := errgroup.WithContext(ctx)
@@ -100,145 +125,265 @@ func (s *composeService) down(ctx context.Context, projectName string, options a
return eg.Wait()
}
-func (s *composeService) ensureVolumesDown(ctx context.Context, project *types.Project, w progress.Writer) []downOp {
+func checkSelectedServices(options api.DownOptions, project *types.Project) ([]string, error) {
+ var services []string
+ for _, service := range options.Services {
+ _, err := project.GetService(service)
+ if err != nil {
+ if options.Project != nil {
+ // ran with an explicit compose.yaml file, so we should not ignore
+ return nil, err
+ }
+ // ran without an explicit compose.yaml file, so can't distinguish typo vs container already removed
+ } else {
+ services = append(services, service)
+ }
+ }
+ return services, nil
+}
+
+func (s *composeService) ensureVolumesDown(ctx context.Context, project *types.Project) []downOp {
var ops []downOp
for _, vol := range project.Volumes {
- if vol.External.External {
+ if vol.External {
continue
}
volumeName := vol.Name
ops = append(ops, func() error {
- return s.removeVolume(ctx, volumeName, w)
+ return s.removeVolume(ctx, volumeName)
})
}
+
return ops
}
-func (s *composeService) ensureImagesDown(ctx context.Context, project *types.Project, options api.DownOptions, w progress.Writer) []downOp {
+func (s *composeService) ensureImagesDown(ctx context.Context, project *types.Project, options api.DownOptions) ([]downOp, error) {
+ imagePruner := NewImagePruner(s.apiClient(), project)
+ pruneOpts := ImagePruneOptions{
+ Mode: ImagePruneMode(options.Images),
+ RemoveOrphans: options.RemoveOrphans,
+ }
+ images, err := imagePruner.ImagesToPrune(ctx, pruneOpts)
+ if err != nil {
+ return nil, err
+ }
+
var ops []downOp
- for image := range s.getServiceImages(options, project) {
- image := image
+ for i := range images {
+ img := images[i]
ops = append(ops, func() error {
- return s.removeImage(ctx, image, w)
+ return s.removeImage(ctx, img)
})
}
- return ops
+ return ops, nil
}
-func (s *composeService) ensureNetworksDown(ctx context.Context, project *types.Project, w progress.Writer) []downOp {
+func (s *composeService) ensureNetworksDown(ctx context.Context, project *types.Project) []downOp {
var ops []downOp
- for _, n := range project.Networks {
- if n.External.External {
+ for key, n := range project.Networks {
+ if n.External {
continue
}
- networkName := n.Name
- _, err := s.apiClient().NetworkInspect(ctx, networkName, moby.NetworkInspectOptions{})
- if client.IsNotFound(err) {
- return nil
- }
-
+ // loop capture variable for op closure
+ networkKey := key
+ idOrName := n.Name
ops = append(ops, func() error {
- return s.removeNetwork(ctx, networkName, w)
+ return s.removeNetwork(ctx, networkKey, project.Name, idOrName)
})
}
return ops
}
-func (s *composeService) getServiceImages(options api.DownOptions, project *types.Project) map[string]struct{} {
- images := map[string]struct{}{}
- for _, service := range project.Services {
- image := service.Image
- if options.Images == "local" && image != "" {
+func (s *composeService) removeNetwork(ctx context.Context, composeNetworkName string, projectName string, name string) error {
+ networks, err := s.apiClient().NetworkList(ctx, network.ListOptions{
+ Filters: filters.NewArgs(
+ projectFilter(projectName),
+ networkFilter(composeNetworkName)),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to list networks: %w", err)
+ }
+
+ if len(networks) == 0 {
+ return nil
+ }
+
+ eventName := fmt.Sprintf("Network %s", name)
+ s.events.On(removingEvent(eventName))
+
+ var found int
+ for _, net := range networks {
+ if net.Name != name {
+ continue
+ }
+ nw, err := s.apiClient().NetworkInspect(ctx, net.ID, network.InspectOptions{})
+ if errdefs.IsNotFound(err) {
+ s.events.On(newEvent(eventName, api.Warning, "No resource found to remove"))
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ if len(nw.Containers) > 0 {
+ s.events.On(newEvent(eventName, api.Warning, "Resource is still in use"))
+ found++
continue
}
- if image == "" {
- image = getImageName(service, project.Name)
+
+ if err := s.apiClient().NetworkRemove(ctx, net.ID); err != nil {
+ if errdefs.IsNotFound(err) {
+ continue
+ }
+ s.events.On(errorEvent(eventName, err.Error()))
+ return fmt.Errorf("failed to remove network %s: %w", name, err)
}
- images[image] = struct{}{}
+ s.events.On(removedEvent(eventName))
+ found++
+ }
+
+ if found == 0 {
+ // in practice, it's extremely unlikely for this to ever occur, as it'd
+ // mean the network was present when we queried at the start of this
+ // method but was then deleted by something else in the interim
+ s.events.On(newEvent(eventName, api.Warning, "No resource found to remove"))
+ return nil
}
- return images
+ return nil
}
-func (s *composeService) removeImage(ctx context.Context, image string, w progress.Writer) error {
+func (s *composeService) removeImage(ctx context.Context, image string) error {
id := fmt.Sprintf("Image %s", image)
- w.Event(progress.NewEvent(id, progress.Working, "Removing"))
- _, err := s.apiClient().ImageRemove(ctx, image, moby.ImageRemoveOptions{})
+ s.events.On(newEvent(id, api.Working, "Removing"))
+ _, err := s.apiClient().ImageRemove(ctx, image, imageapi.RemoveOptions{})
if err == nil {
- w.Event(progress.NewEvent(id, progress.Done, "Removed"))
+ s.events.On(newEvent(id, api.Done, "Removed"))
+ return nil
+ }
+ if errdefs.IsConflict(err) {
+ s.events.On(newEvent(id, api.Warning, "Resource is still in use"))
return nil
}
if errdefs.IsNotFound(err) {
- w.Event(progress.NewEvent(id, progress.Done, "Warning: No resource found to remove"))
+ s.events.On(newEvent(id, api.Done, "Warning: No resource found to remove"))
return nil
}
return err
}
-func (s *composeService) removeVolume(ctx context.Context, id string, w progress.Writer) error {
+func (s *composeService) removeVolume(ctx context.Context, id string) error {
resource := fmt.Sprintf("Volume %s", id)
- w.Event(progress.NewEvent(resource, progress.Working, "Removing"))
- err := s.apiClient().VolumeRemove(ctx, id, true)
+
+ _, err := s.apiClient().VolumeInspect(ctx, id)
+ if errdefs.IsNotFound(err) {
+ // Already gone
+ return nil
+ }
+
+ s.events.On(newEvent(resource, api.Working, "Removing"))
+ err = s.apiClient().VolumeRemove(ctx, id, true)
if err == nil {
- w.Event(progress.NewEvent(resource, progress.Done, "Removed"))
+ s.events.On(newEvent(resource, api.Done, "Removed"))
+ return nil
+ }
+ if errdefs.IsConflict(err) {
+ s.events.On(newEvent(resource, api.Warning, "Resource is still in use"))
return nil
}
if errdefs.IsNotFound(err) {
- w.Event(progress.NewEvent(resource, progress.Done, "Warning: No resource found to remove"))
+ s.events.On(newEvent(resource, api.Done, "Warning: No resource found to remove"))
return nil
}
return err
}
-func (s *composeService) stopContainers(ctx context.Context, w progress.Writer, containers []moby.Container, timeout *time.Duration) error {
- eg, ctx := errgroup.WithContext(ctx)
- for _, container := range containers {
- container := container
- eg.Go(func() error {
- eventName := getContainerProgressName(container)
- w.Event(progress.StoppingEvent(eventName))
- err := s.apiClient().ContainerStop(ctx, container.ID, timeout)
+func (s *composeService) stopContainer(ctx context.Context, service *types.ServiceConfig, ctr containerType.Summary, timeout *time.Duration, listener api.ContainerEventListener) error {
+ eventName := getContainerProgressName(ctr)
+ s.events.On(stoppingEvent(eventName))
+
+ if service != nil {
+ for _, hook := range service.PreStop {
+ err := s.runHook(ctx, ctr, *service, hook, listener)
if err != nil {
- w.Event(progress.ErrorMessageEvent(eventName, "Error while Stopping"))
+ // Ignore errors indicating that some containers were already stopped or removed.
+ if errdefs.IsNotFound(err) || errdefs.IsConflict(err) {
+ return nil
+ }
return err
}
- w.Event(progress.StoppedEvent(eventName))
- return nil
+ }
+ }
+
+ timeoutInSecond := utils.DurationSecondToInt(timeout)
+ err := s.apiClient().ContainerStop(ctx, ctr.ID, containerType.StopOptions{Timeout: timeoutInSecond})
+ if err != nil {
+ s.events.On(errorEvent(eventName, "Error while Stopping"))
+ return err
+ }
+ s.events.On(stoppedEvent(eventName))
+ return nil
+}
+
+func (s *composeService) stopContainers(ctx context.Context, serv *types.ServiceConfig, containers []containerType.Summary, timeout *time.Duration, listener api.ContainerEventListener) error {
+ eg, ctx := errgroup.WithContext(ctx)
+ for _, ctr := range containers {
+ eg.Go(func() error {
+ return s.stopContainer(ctx, serv, ctr, timeout, listener)
})
}
return eg.Wait()
}
-func (s *composeService) removeContainers(ctx context.Context, w progress.Writer, containers []moby.Container, timeout *time.Duration, volumes bool) error {
+func (s *composeService) removeContainers(ctx context.Context, containers []containerType.Summary, service *types.ServiceConfig, timeout *time.Duration, volumes bool) error {
eg, _ := errgroup.WithContext(ctx)
- for _, container := range containers {
- container := container
+ for _, ctr := range containers {
eg.Go(func() error {
- eventName := getContainerProgressName(container)
- w.Event(progress.StoppingEvent(eventName))
- err := s.stopContainers(ctx, w, []moby.Container{container}, timeout)
- if err != nil {
- w.Event(progress.ErrorMessageEvent(eventName, "Error while Stopping"))
- return err
- }
- w.Event(progress.RemovingEvent(eventName))
- err = s.apiClient().ContainerRemove(ctx, container.ID, moby.ContainerRemoveOptions{
- Force: true,
- RemoveVolumes: volumes,
- })
- if err != nil {
- w.Event(progress.ErrorMessageEvent(eventName, "Error while Removing"))
- return err
- }
- w.Event(progress.RemovedEvent(eventName))
- return nil
+ return s.stopAndRemoveContainer(ctx, ctr, service, timeout, volumes)
})
}
return eg.Wait()
}
+func (s *composeService) stopAndRemoveContainer(ctx context.Context, ctr containerType.Summary, service *types.ServiceConfig, timeout *time.Duration, volumes bool) error {
+ eventName := getContainerProgressName(ctr)
+ err := s.stopContainer(ctx, service, ctr, timeout, nil)
+ if errdefs.IsNotFound(err) {
+ s.events.On(removedEvent(eventName))
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ s.events.On(removingEvent(eventName))
+ err = s.apiClient().ContainerRemove(ctx, ctr.ID, containerType.RemoveOptions{
+ Force: true,
+ RemoveVolumes: volumes,
+ })
+ if err != nil && !errdefs.IsNotFound(err) && !errdefs.IsConflict(err) {
+ s.events.On(errorEvent(eventName, "Error while Removing"))
+ return err
+ }
+ s.events.On(removedEvent(eventName))
+ return nil
+}
+
func (s *composeService) getProjectWithResources(ctx context.Context, containers Containers, projectName string) (*types.Project, error) {
containers = containers.filter(isNotOneOff)
- project, _ := s.projectFromName(containers, projectName)
+ p, err := s.projectFromName(containers, projectName)
+ if err != nil && !api.IsNotFoundError(err) {
+ return nil, err
+ }
+ project, err := p.WithServicesTransform(func(name string, service types.ServiceConfig) (types.ServiceConfig, error) {
+ for k := range service.DependsOn {
+ if dependency, ok := service.DependsOn[k]; ok {
+ dependency.Required = false
+ service.DependsOn[k] = dependency
+ }
+ }
+ return service, nil
+ })
+ if err != nil {
+ return nil, err
+ }
volumes, err := s.actualVolumes(ctx, projectName)
if err != nil {
@@ -251,5 +396,6 @@ func (s *composeService) getProjectWithResources(ctx context.Context, containers
return nil, err
}
project.Networks = networks
+
return project, nil
}
diff --git a/pkg/compose/down_test.go b/pkg/compose/down_test.go
index c799975cc15..8966617504d 100644
--- a/pkg/compose/down_test.go
+++ b/pkg/compose/down_test.go
@@ -18,51 +18,166 @@ package compose
import (
"context"
+ "fmt"
+ "os"
"strings"
"testing"
- compose "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/mocks"
- moby "github.com/docker/docker/api/types"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/containerd/errdefs"
+ "github.com/docker/cli/cli/streams"
+ "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/volume"
- "github.com/golang/mock/gomock"
+ "go.uber.org/mock/gomock"
"gotest.tools/v3/assert"
+
+ compose "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/mocks"
)
func TestDown(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- api := mocks.NewMockAPIClient(mockCtrl)
- cli := mocks.NewMockCli(mockCtrl)
- tested.dockerCli = cli
- cli.EXPECT().Client().Return(api).AnyTimes()
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
+
+ api.EXPECT().ContainerList(gomock.Any(), projectFilterListOpt(false)).Return(
+ []container.Summary{
+ testContainer("service1", "123", false),
+ testContainer("service2", "456", false),
+ testContainer("service2", "789", false),
+ testContainer("service_orphan", "321", true),
+ }, nil)
+ api.EXPECT().VolumeList(
+ gomock.Any(),
+ volume.ListOptions{
+ Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject))),
+ }).
+ Return(volume.ListResponse{}, nil)
+
+ // network names are not guaranteed to be unique, ensure Compose handles
+ // cleanup properly if duplicates are inadvertently created
+ api.EXPECT().NetworkList(gomock.Any(), network.ListOptions{Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject)))}).
+ Return([]network.Summary{
+ {ID: "abc123", Name: "myProject_default", Labels: map[string]string{compose.NetworkLabel: "default"}},
+ {ID: "def456", Name: "myProject_default", Labels: map[string]string{compose.NetworkLabel: "default"}},
+ }, nil)
+
+ stopOptions := container.StopOptions{}
+ api.EXPECT().ContainerStop(gomock.Any(), "123", stopOptions).Return(nil)
+ api.EXPECT().ContainerStop(gomock.Any(), "456", stopOptions).Return(nil)
+ api.EXPECT().ContainerStop(gomock.Any(), "789", stopOptions).Return(nil)
+
+ api.EXPECT().ContainerRemove(gomock.Any(), "123", container.RemoveOptions{Force: true}).Return(nil)
+ api.EXPECT().ContainerRemove(gomock.Any(), "456", container.RemoveOptions{Force: true}).Return(nil)
+ api.EXPECT().ContainerRemove(gomock.Any(), "789", container.RemoveOptions{Force: true}).Return(nil)
+
+ api.EXPECT().NetworkList(gomock.Any(), network.ListOptions{
+ Filters: filters.NewArgs(
+ projectFilter(strings.ToLower(testProject)),
+ networkFilter("default")),
+ }).Return([]network.Summary{
+ {ID: "abc123", Name: "myProject_default"},
+ {ID: "def456", Name: "myProject_default"},
+ }, nil)
+ api.EXPECT().NetworkInspect(gomock.Any(), "abc123", gomock.Any()).Return(network.Inspect{ID: "abc123"}, nil)
+ api.EXPECT().NetworkInspect(gomock.Any(), "def456", gomock.Any()).Return(network.Inspect{ID: "def456"}, nil)
+ api.EXPECT().NetworkRemove(gomock.Any(), "abc123").Return(nil)
+ api.EXPECT().NetworkRemove(gomock.Any(), "def456").Return(nil)
+
+ err = tested.Down(context.Background(), strings.ToLower(testProject), compose.DownOptions{})
+ assert.NilError(t, err)
+}
+
+func TestDownWithGivenServices(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
- api.EXPECT().ContainerList(gomock.Any(), projectFilterListOpt()).Return(
- []moby.Container{
+ api.EXPECT().ContainerList(gomock.Any(), projectFilterListOpt(false)).Return(
+ []container.Summary{
testContainer("service1", "123", false),
testContainer("service2", "456", false),
testContainer("service2", "789", false),
testContainer("service_orphan", "321", true),
}, nil)
- api.EXPECT().VolumeList(gomock.Any(), filters.NewArgs(projectFilter(strings.ToLower(testProject)))).
- Return(volume.VolumeListOKBody{}, nil)
- api.EXPECT().NetworkList(gomock.Any(), moby.NetworkListOptions{Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject)))}).
- Return([]moby.NetworkResource{{Name: "myProject_default"}}, nil)
+ api.EXPECT().VolumeList(
+ gomock.Any(),
+ volume.ListOptions{
+ Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject))),
+ }).
+ Return(volume.ListResponse{}, nil)
+
+ // network names are not guaranteed to be unique, ensure Compose handles
+ // cleanup properly if duplicates are inadvertently created
+ api.EXPECT().NetworkList(gomock.Any(), network.ListOptions{Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject)))}).
+ Return([]network.Summary{
+ {ID: "abc123", Name: "myProject_default", Labels: map[string]string{compose.NetworkLabel: "default"}},
+ {ID: "def456", Name: "myProject_default", Labels: map[string]string{compose.NetworkLabel: "default"}},
+ }, nil)
- api.EXPECT().ContainerStop(gomock.Any(), "123", nil).Return(nil)
- api.EXPECT().ContainerStop(gomock.Any(), "456", nil).Return(nil)
- api.EXPECT().ContainerStop(gomock.Any(), "789", nil).Return(nil)
+ stopOptions := container.StopOptions{}
+ api.EXPECT().ContainerStop(gomock.Any(), "123", stopOptions).Return(nil)
- api.EXPECT().ContainerRemove(gomock.Any(), "123", moby.ContainerRemoveOptions{Force: true}).Return(nil)
- api.EXPECT().ContainerRemove(gomock.Any(), "456", moby.ContainerRemoveOptions{Force: true}).Return(nil)
- api.EXPECT().ContainerRemove(gomock.Any(), "789", moby.ContainerRemoveOptions{Force: true}).Return(nil)
+ api.EXPECT().ContainerRemove(gomock.Any(), "123", container.RemoveOptions{Force: true}).Return(nil)
- api.EXPECT().NetworkInspect(gomock.Any(), "myProject_default", moby.NetworkInspectOptions{}).Return(moby.NetworkResource{Name: "myProject_default"}, nil)
- api.EXPECT().NetworkRemove(gomock.Any(), "myProject_default").Return(nil)
+ api.EXPECT().NetworkList(gomock.Any(), network.ListOptions{
+ Filters: filters.NewArgs(
+ projectFilter(strings.ToLower(testProject)),
+ networkFilter("default")),
+ }).Return([]network.Summary{
+ {ID: "abc123", Name: "myProject_default"},
+ }, nil)
+ api.EXPECT().NetworkInspect(gomock.Any(), "abc123", gomock.Any()).Return(network.Inspect{ID: "abc123"}, nil)
+ api.EXPECT().NetworkRemove(gomock.Any(), "abc123").Return(nil)
- err := tested.Down(context.Background(), strings.ToLower(testProject), compose.DownOptions{})
+ err = tested.Down(context.Background(), strings.ToLower(testProject), compose.DownOptions{
+ Services: []string{"service1", "not-running-service"},
+ })
+ assert.NilError(t, err)
+}
+
+func TestDownWithSpecifiedServiceButTheServicesAreNotRunning(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
+
+ api.EXPECT().ContainerList(gomock.Any(), projectFilterListOpt(false)).Return(
+ []container.Summary{
+ testContainer("service1", "123", false),
+ testContainer("service2", "456", false),
+ testContainer("service2", "789", false),
+ testContainer("service_orphan", "321", true),
+ }, nil)
+ api.EXPECT().VolumeList(
+ gomock.Any(),
+ volume.ListOptions{
+ Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject))),
+ }).
+ Return(volume.ListResponse{}, nil)
+
+ // network names are not guaranteed to be unique, ensure Compose handles
+ // cleanup properly if duplicates are inadvertently created
+ api.EXPECT().NetworkList(gomock.Any(), network.ListOptions{Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject)))}).
+ Return([]network.Summary{
+ {ID: "abc123", Name: "myProject_default", Labels: map[string]string{compose.NetworkLabel: "default"}},
+ {ID: "def456", Name: "myProject_default", Labels: map[string]string{compose.NetworkLabel: "default"}},
+ }, nil)
+
+ err = tested.Down(context.Background(), strings.ToLower(testProject), compose.DownOptions{
+ Services: []string{"not-running-service1", "not-running-service2"},
+ })
assert.NilError(t, err)
}
@@ -70,34 +185,49 @@ func TestDownRemoveOrphans(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- api := mocks.NewMockAPIClient(mockCtrl)
- cli := mocks.NewMockCli(mockCtrl)
- tested.dockerCli = cli
- cli.EXPECT().Client().Return(api).AnyTimes()
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
- api.EXPECT().ContainerList(gomock.Any(), projectFilterListOpt()).Return(
- []moby.Container{
+ api.EXPECT().ContainerList(gomock.Any(), projectFilterListOpt(true)).Return(
+ []container.Summary{
testContainer("service1", "123", false),
testContainer("service2", "789", false),
testContainer("service_orphan", "321", true),
}, nil)
- api.EXPECT().VolumeList(gomock.Any(), filters.NewArgs(projectFilter(strings.ToLower(testProject)))).
- Return(volume.VolumeListOKBody{}, nil)
- api.EXPECT().NetworkList(gomock.Any(), moby.NetworkListOptions{Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject)))}).
- Return([]moby.NetworkResource{{Name: "myProject_default"}}, nil)
+ api.EXPECT().VolumeList(
+ gomock.Any(),
+ volume.ListOptions{
+ Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject))),
+ }).
+ Return(volume.ListResponse{}, nil)
+ api.EXPECT().NetworkList(gomock.Any(), network.ListOptions{Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject)))}).
+ Return([]network.Summary{
+ {
+ Name: "myProject_default",
+ Labels: map[string]string{compose.NetworkLabel: "default"},
+ },
+ }, nil)
- api.EXPECT().ContainerStop(gomock.Any(), "123", nil).Return(nil)
- api.EXPECT().ContainerStop(gomock.Any(), "789", nil).Return(nil)
- api.EXPECT().ContainerStop(gomock.Any(), "321", nil).Return(nil)
+ stopOptions := container.StopOptions{}
+ api.EXPECT().ContainerStop(gomock.Any(), "123", stopOptions).Return(nil)
+ api.EXPECT().ContainerStop(gomock.Any(), "789", stopOptions).Return(nil)
+ api.EXPECT().ContainerStop(gomock.Any(), "321", stopOptions).Return(nil)
- api.EXPECT().ContainerRemove(gomock.Any(), "123", moby.ContainerRemoveOptions{Force: true}).Return(nil)
- api.EXPECT().ContainerRemove(gomock.Any(), "789", moby.ContainerRemoveOptions{Force: true}).Return(nil)
- api.EXPECT().ContainerRemove(gomock.Any(), "321", moby.ContainerRemoveOptions{Force: true}).Return(nil)
+ api.EXPECT().ContainerRemove(gomock.Any(), "123", container.RemoveOptions{Force: true}).Return(nil)
+ api.EXPECT().ContainerRemove(gomock.Any(), "789", container.RemoveOptions{Force: true}).Return(nil)
+ api.EXPECT().ContainerRemove(gomock.Any(), "321", container.RemoveOptions{Force: true}).Return(nil)
- api.EXPECT().NetworkInspect(gomock.Any(), "myProject_default", moby.NetworkInspectOptions{}).Return(moby.NetworkResource{Name: "myProject_default"}, nil)
- api.EXPECT().NetworkRemove(gomock.Any(), "myProject_default").Return(nil)
+ api.EXPECT().NetworkList(gomock.Any(), network.ListOptions{
+ Filters: filters.NewArgs(
+ networkFilter("default"),
+ projectFilter(strings.ToLower(testProject)),
+ ),
+ }).Return([]network.Summary{{ID: "abc123", Name: "myProject_default"}}, nil)
+ api.EXPECT().NetworkInspect(gomock.Any(), "abc123", gomock.Any()).Return(network.Inspect{ID: "abc123"}, nil)
+ api.EXPECT().NetworkRemove(gomock.Any(), "abc123").Return(nil)
- err := tested.Down(context.Background(), strings.ToLower(testProject), compose.DownOptions{RemoveOrphans: true})
+ err = tested.Down(context.Background(), strings.ToLower(testProject), compose.DownOptions{RemoveOrphans: true})
assert.NilError(t, err)
}
@@ -105,25 +235,186 @@ func TestDownRemoveVolumes(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- api := mocks.NewMockAPIClient(mockCtrl)
- cli := mocks.NewMockCli(mockCtrl)
- tested.dockerCli = cli
- cli.EXPECT().Client().Return(api).AnyTimes()
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
- api.EXPECT().ContainerList(gomock.Any(), projectFilterListOpt()).Return(
- []moby.Container{testContainer("service1", "123", false)}, nil)
- api.EXPECT().VolumeList(gomock.Any(), filters.NewArgs(projectFilter(strings.ToLower(testProject)))).
- Return(volume.VolumeListOKBody{
- Volumes: []*moby.Volume{{Name: "myProject_volume"}},
+ api.EXPECT().ContainerList(gomock.Any(), projectFilterListOpt(false)).Return(
+ []container.Summary{testContainer("service1", "123", false)}, nil)
+ api.EXPECT().VolumeList(
+ gomock.Any(),
+ volume.ListOptions{
+ Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject))),
+ }).
+ Return(volume.ListResponse{
+ Volumes: []*volume.Volume{{Name: "myProject_volume"}},
}, nil)
- api.EXPECT().NetworkList(gomock.Any(), moby.NetworkListOptions{Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject)))}).
+ api.EXPECT().VolumeInspect(gomock.Any(), "myProject_volume").
+ Return(volume.Volume{}, nil)
+ api.EXPECT().NetworkList(gomock.Any(), network.ListOptions{Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject)))}).
Return(nil, nil)
- api.EXPECT().ContainerStop(gomock.Any(), "123", nil).Return(nil)
- api.EXPECT().ContainerRemove(gomock.Any(), "123", moby.ContainerRemoveOptions{Force: true, RemoveVolumes: true}).Return(nil)
+ api.EXPECT().ContainerStop(gomock.Any(), "123", container.StopOptions{}).Return(nil)
+ api.EXPECT().ContainerRemove(gomock.Any(), "123", container.RemoveOptions{Force: true, RemoveVolumes: true}).Return(nil)
api.EXPECT().VolumeRemove(gomock.Any(), "myProject_volume", true).Return(nil)
- err := tested.Down(context.Background(), strings.ToLower(testProject), compose.DownOptions{Volumes: true})
+ err = tested.Down(context.Background(), strings.ToLower(testProject), compose.DownOptions{Volumes: true})
+ assert.NilError(t, err)
+}
+
+func TestDownRemoveImages(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ opts := compose.DownOptions{
+ Project: &types.Project{
+ Name: strings.ToLower(testProject),
+ Services: types.Services{
+ "local-anonymous": {Name: "local-anonymous"},
+ "local-named": {Name: "local-named", Image: "local-named-image"},
+ "remote": {Name: "remote", Image: "remote-image"},
+ "remote-tagged": {Name: "remote-tagged", Image: "registry.example.com/remote-image-tagged:v1.0"},
+ "no-images-anonymous": {Name: "no-images-anonymous"},
+ "no-images-named": {Name: "no-images-named", Image: "missing-named-image"},
+ },
+ },
+ }
+
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
+
+ api.EXPECT().ContainerList(gomock.Any(), projectFilterListOpt(false)).
+ Return([]container.Summary{
+ testContainer("service1", "123", false),
+ }, nil).
+ AnyTimes()
+
+ api.EXPECT().ImageList(gomock.Any(), image.ListOptions{
+ Filters: filters.NewArgs(
+ projectFilter(strings.ToLower(testProject)),
+ filters.Arg("dangling", "false"),
+ ),
+ }).Return([]image.Summary{
+ {
+ Labels: types.Labels{compose.ServiceLabel: "local-anonymous"},
+ RepoTags: []string{"testproject-local-anonymous:latest"},
+ },
+ {
+ Labels: types.Labels{compose.ServiceLabel: "local-named"},
+ RepoTags: []string{"local-named-image:latest"},
+ },
+ }, nil).AnyTimes()
+
+ imagesToBeInspected := map[string]bool{
+ "testproject-local-anonymous": true,
+ "local-named-image": true,
+ "remote-image": true,
+ "testproject-no-images-anonymous": false,
+ "missing-named-image": false,
+ }
+ for img, exists := range imagesToBeInspected {
+ var resp image.InspectResponse
+ var err error
+ if exists {
+ resp.RepoTags = []string{img}
+ } else {
+ err = errdefs.ErrNotFound.WithMessage(fmt.Sprintf("test specified that image %q should not exist", img))
+ }
+
+ api.EXPECT().ImageInspect(gomock.Any(), img).
+ Return(resp, err).
+ AnyTimes()
+ }
+
+ api.EXPECT().ImageInspect(gomock.Any(), "registry.example.com/remote-image-tagged:v1.0").
+ Return(image.InspectResponse{RepoTags: []string{"registry.example.com/remote-image-tagged:v1.0"}}, nil).
+ AnyTimes()
+
+ localImagesToBeRemoved := []string{
+ "testproject-local-anonymous:latest",
+ "local-named-image:latest",
+ }
+ for _, img := range localImagesToBeRemoved {
+ // test calls down --rmi=local then down --rmi=all, so local images
+ // get "removed" 2x, while other images are only 1x
+ api.EXPECT().ImageRemove(gomock.Any(), img, image.RemoveOptions{}).
+ Return(nil, nil).
+ Times(2)
+ }
+
+ t.Log("-> docker compose down --rmi=local")
+ opts.Images = "local"
+ err = tested.Down(context.Background(), strings.ToLower(testProject), opts)
+ assert.NilError(t, err)
+
+ otherImagesToBeRemoved := []string{
+ "remote-image:latest",
+ "registry.example.com/remote-image-tagged:v1.0",
+ }
+ for _, img := range otherImagesToBeRemoved {
+ api.EXPECT().ImageRemove(gomock.Any(), img, image.RemoveOptions{}).
+ Return(nil, nil).
+ Times(1)
+ }
+
+ t.Log("-> docker compose down --rmi=all")
+ opts.Images = "all"
+ err = tested.Down(context.Background(), strings.ToLower(testProject), opts)
+ assert.NilError(t, err)
+}
+
+func TestDownRemoveImages_NoLabel(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
assert.NilError(t, err)
+
+ ctr := testContainer("service1", "123", false)
+
+ api.EXPECT().ContainerList(gomock.Any(), projectFilterListOpt(false)).Return(
+ []container.Summary{ctr}, nil)
+
+ api.EXPECT().VolumeList(
+ gomock.Any(),
+ volume.ListOptions{
+ Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject))),
+ }).
+ Return(volume.ListResponse{
+ Volumes: []*volume.Volume{{Name: "myProject_volume"}},
+ }, nil)
+ api.EXPECT().NetworkList(gomock.Any(), network.ListOptions{Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject)))}).
+ Return(nil, nil)
+
+ // ImageList returns no images for the project since they were unlabeled
+ // (created by an older version of Compose)
+ api.EXPECT().ImageList(gomock.Any(), image.ListOptions{
+ Filters: filters.NewArgs(
+ projectFilter(strings.ToLower(testProject)),
+ filters.Arg("dangling", "false"),
+ ),
+ }).Return(nil, nil)
+
+ api.EXPECT().ImageInspect(gomock.Any(), "testproject-service1").
+ Return(image.InspectResponse{}, nil)
+
+ api.EXPECT().ContainerStop(gomock.Any(), "123", container.StopOptions{}).Return(nil)
+ api.EXPECT().ContainerRemove(gomock.Any(), "123", container.RemoveOptions{Force: true}).Return(nil)
+
+ api.EXPECT().ImageRemove(gomock.Any(), "testproject-service1:latest", image.RemoveOptions{}).Return(nil, nil)
+
+ err = tested.Down(context.Background(), strings.ToLower(testProject), compose.DownOptions{Images: "local"})
+ assert.NilError(t, err)
+}
+
+func prepareMocks(mockCtrl *gomock.Controller) (*mocks.MockAPIClient, *mocks.MockCli) {
+ api := mocks.NewMockAPIClient(mockCtrl)
+ cli := mocks.NewMockCli(mockCtrl)
+ cli.EXPECT().Client().Return(api).AnyTimes()
+ cli.EXPECT().Err().Return(streams.NewOut(os.Stderr)).AnyTimes()
+ cli.EXPECT().Out().Return(streams.NewOut(os.Stdout)).AnyTimes()
+ return api, cli
}
diff --git a/pkg/compose/envresolver.go b/pkg/compose/envresolver.go
new file mode 100644
index 00000000000..a86d9351919
--- /dev/null
+++ b/pkg/compose/envresolver.go
@@ -0,0 +1,66 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "runtime"
+ "strings"
+)
+
+// isCaseInsensitiveEnvVars is true on platforms where environment variable names are treated case-insensitively.
+var isCaseInsensitiveEnvVars = (runtime.GOOS == "windows")
+
+// envResolver returns resolver for environment variables suitable for the current platform.
+// Expected to be used with `MappingWithEquals.Resolve`.
+// Updates in `environment` may not be reflected.
+func envResolver(environment map[string]string) func(string) (string, bool) {
+ return envResolverWithCase(environment, isCaseInsensitiveEnvVars)
+}
+
+// envResolverWithCase returns resolver for environment variables with the specified case-sensitive condition.
+// Expected to be used with `MappingWithEquals.Resolve`.
+// Updates in `environment` may not be reflected.
+func envResolverWithCase(environment map[string]string, caseInsensitive bool) func(string) (string, bool) {
+ if environment == nil {
+ return func(s string) (string, bool) {
+ return "", false
+ }
+ }
+ if !caseInsensitive {
+ return func(s string) (string, bool) {
+ v, ok := environment[s]
+ return v, ok
+ }
+ }
+ // variable names must be treated case-insensitively.
+ // Resolves in this way:
+ // * Return the value if its name matches with the passed name case-sensitively.
+ // * Otherwise, return the value if its lower-cased name matches lower-cased passed name.
+ // * The value is indefinite if multiple variable matches.
+ loweredEnvironment := make(map[string]string, len(environment))
+ for k, v := range environment {
+ loweredEnvironment[strings.ToLower(k)] = v
+ }
+ return func(s string) (string, bool) {
+ v, ok := environment[s]
+ if ok {
+ return v, ok
+ }
+ v, ok = loweredEnvironment[strings.ToLower(s)]
+ return v, ok
+ }
+}
diff --git a/pkg/compose/envresolver_test.go b/pkg/compose/envresolver_test.go
new file mode 100644
index 00000000000..fca5f719186
--- /dev/null
+++ b/pkg/compose/envresolver_test.go
@@ -0,0 +1,115 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "testing"
+
+ "gotest.tools/v3/assert"
+)
+
+func Test_EnvResolverWithCase(t *testing.T) {
+ tests := []struct {
+ name string
+ environment map[string]string
+ caseInsensitive bool
+ search string
+ expectedValue string
+ expectedOk bool
+ }{
+ {
+ name: "case sensitive/case match",
+ environment: map[string]string{
+ "Env1": "Value1",
+ "Env2": "Value2",
+ },
+ caseInsensitive: false,
+ search: "Env1",
+ expectedValue: "Value1",
+ expectedOk: true,
+ },
+ {
+ name: "case sensitive/case unmatch",
+ environment: map[string]string{
+ "Env1": "Value1",
+ "Env2": "Value2",
+ },
+ caseInsensitive: false,
+ search: "ENV1",
+ expectedValue: "",
+ expectedOk: false,
+ },
+ {
+ name: "case sensitive/nil environment",
+ environment: nil,
+ caseInsensitive: false,
+ search: "Env1",
+ expectedValue: "",
+ expectedOk: false,
+ },
+ {
+ name: "case insensitive/case match",
+ environment: map[string]string{
+ "Env1": "Value1",
+ "Env2": "Value2",
+ },
+ caseInsensitive: true,
+ search: "Env1",
+ expectedValue: "Value1",
+ expectedOk: true,
+ },
+ {
+ name: "case insensitive/case unmatch",
+ environment: map[string]string{
+ "Env1": "Value1",
+ "Env2": "Value2",
+ },
+ caseInsensitive: true,
+ search: "ENV1",
+ expectedValue: "Value1",
+ expectedOk: true,
+ },
+ {
+ name: "case insensitive/unmatch",
+ environment: map[string]string{
+ "Env1": "Value1",
+ "Env2": "Value2",
+ },
+ caseInsensitive: true,
+ search: "Env3",
+ expectedValue: "",
+ expectedOk: false,
+ },
+ {
+ name: "case insensitive/nil environment",
+ environment: nil,
+ caseInsensitive: true,
+ search: "Env1",
+ expectedValue: "",
+ expectedOk: false,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ f := envResolverWithCase(test.environment, test.caseInsensitive)
+ v, ok := f(test.search)
+ assert.Equal(t, v, test.expectedValue)
+ assert.Equal(t, ok, test.expectedOk)
+ })
+ }
+}
diff --git a/pkg/compose/errors.go b/pkg/compose/errors.go
deleted file mode 100644
index d97fe713d3c..00000000000
--- a/pkg/compose/errors.go
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package compose
-
-import (
- "io/fs"
-
- "github.com/pkg/errors"
-
- "github.com/compose-spec/compose-go/errdefs"
-)
-
-// Error error to categorize failures and extract metrics info
-type Error struct {
- Err error
- Category *FailureCategory
-}
-
-// WrapComposeError wraps the error if not nil, otherwise returns nil
-func WrapComposeError(err error) error {
- if err == nil {
- return nil
- }
- return Error{
- Err: err,
- }
-}
-
-// WrapCategorisedComposeError wraps the error if not nil, otherwise returns nil
-func WrapCategorisedComposeError(err error, failure FailureCategory) error {
- if err == nil {
- return nil
- }
- return Error{
- Err: err,
- Category: &failure,
- }
-}
-
-// Unwrap get underlying error
-func (e Error) Unwrap() error { return e.Err }
-
-func (e Error) Error() string { return e.Err.Error() }
-
-// GetMetricsFailureCategory get metrics status and error code corresponding to this error
-func (e Error) GetMetricsFailureCategory() FailureCategory {
- if e.Category != nil {
- return *e.Category
- }
- var pathError *fs.PathError
- if errors.As(e.Err, &pathError) {
- return FileNotFoundFailure
- }
- if errdefs.IsNotFoundError(e.Err) {
- return FileNotFoundFailure
- }
- return ComposeParseFailure
-}
diff --git a/pkg/compose/events.go b/pkg/compose/events.go
index 4d04aaa737d..af75bd7c530 100644
--- a/pkg/compose/events.go
+++ b/pkg/compose/events.go
@@ -18,24 +18,26 @@ package compose
import (
"context"
+ "slices"
"strings"
"time"
- moby "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
- "github.com/docker/compose/v2/pkg/api"
-
- "github.com/docker/compose/v2/pkg/utils"
+ "github.com/docker/compose/v5/pkg/api"
)
-func (s *composeService) Events(ctx context.Context, project string, options api.EventsOptions) error {
- events, errors := s.apiClient().Events(ctx, moby.EventsOptions{
- Filters: filters.NewArgs(projectFilter(project)),
+func (s *composeService) Events(ctx context.Context, projectName string, options api.EventsOptions) error {
+ projectName = strings.ToLower(projectName)
+ evts, errors := s.apiClient().Events(ctx, events.ListOptions{
+ Filters: filters.NewArgs(projectFilter(projectName)),
+ Since: options.Since,
+ Until: options.Until,
})
for {
select {
- case event := <-events:
+ case event := <-evts:
// TODO: support other event types
if event.Type != "container" {
continue
@@ -47,7 +49,7 @@ func (s *composeService) Events(ctx context.Context, project string, options api
continue
}
service := event.Actor.Attributes[api.ServiceLabel]
- if len(options.Services) > 0 && !utils.StringContains(options.Services, service) {
+ if len(options.Services) > 0 && !slices.Contains(options.Services, service) {
continue
}
@@ -66,8 +68,8 @@ func (s *composeService) Events(ctx context.Context, project string, options api
err := options.Consumer(api.Event{
Timestamp: timestamp,
Service: service,
- Container: event.ID,
- Status: event.Status,
+ Container: event.Actor.ID,
+ Status: string(event.Action),
Attributes: attributes,
})
if err != nil {
diff --git a/pkg/compose/exec.go b/pkg/compose/exec.go
index cbc6c0a11e4..b1f4666f33b 100644
--- a/pkg/compose/exec.go
+++ b/pkg/compose/exec.go
@@ -18,58 +18,45 @@ package compose
import (
"context"
- "fmt"
+ "errors"
+ "strings"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command/container"
- "github.com/docker/compose/v2/pkg/api"
- moby "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
+ "github.com/docker/compose/v5/pkg/api"
+ containerType "github.com/docker/docker/api/types/container"
)
-func (s *composeService) Exec(ctx context.Context, project string, opts api.RunOptions) (int, error) {
- target, err := s.getExecTarget(ctx, project, opts)
+func (s *composeService) Exec(ctx context.Context, projectName string, options api.RunOptions) (int, error) {
+ projectName = strings.ToLower(projectName)
+ target, err := s.getExecTarget(ctx, projectName, options)
if err != nil {
return 0, err
}
exec := container.NewExecOptions()
- exec.Interactive = opts.Interactive
- exec.TTY = opts.Tty
- exec.Detach = opts.Detach
- exec.User = opts.User
- exec.Privileged = opts.Privileged
- exec.Workdir = opts.WorkingDir
- exec.Container = target.ID
- exec.Command = opts.Command
- for _, v := range opts.Environment {
+ exec.Interactive = options.Interactive
+ exec.TTY = options.Tty
+ exec.Detach = options.Detach
+ exec.User = options.User
+ exec.Privileged = options.Privileged
+ exec.Workdir = options.WorkingDir
+ exec.Command = options.Command
+ for _, v := range options.Environment {
err := exec.Env.Set(v)
if err != nil {
return 0, err
}
}
- err = container.RunExec(s.dockerCli, exec)
- if sterr, ok := err.(cli.StatusError); ok {
- return sterr.StatusCode, nil
+ err = container.RunExec(ctx, s.dockerCli, target.ID, exec)
+ var sterr cli.StatusError
+ if errors.As(err, &sterr) {
+ return sterr.StatusCode, err
}
return 0, err
}
-func (s *composeService) getExecTarget(ctx context.Context, projectName string, opts api.RunOptions) (moby.Container, error) {
- containers, err := s.apiClient().ContainerList(ctx, moby.ContainerListOptions{
- Filters: filters.NewArgs(
- projectFilter(projectName),
- serviceFilter(opts.Service),
- containerNumberFilter(opts.Index),
- ),
- })
- if err != nil {
- return moby.Container{}, err
- }
- if len(containers) < 1 {
- return moby.Container{}, fmt.Errorf("service %q is not running container #%d", opts.Service, opts.Index)
- }
- container := containers[0]
- return container, nil
+func (s *composeService) getExecTarget(ctx context.Context, projectName string, opts api.RunOptions) (containerType.Summary, error) {
+ return s.getSpecifiedContainer(ctx, projectName, oneOffInclude, false, opts.Service, opts.Index)
}
diff --git a/pkg/compose/export.go b/pkg/compose/export.go
new file mode 100644
index 00000000000..65dd31cd1b3
--- /dev/null
+++ b/pkg/compose/export.go
@@ -0,0 +1,93 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/moby/sys/atomicwriter"
+)
+
+func (s *composeService) Export(ctx context.Context, projectName string, options api.ExportOptions) error {
+ return Run(ctx, func(ctx context.Context) error {
+ return s.export(ctx, projectName, options)
+ }, "export", s.events)
+}
+
+func (s *composeService) export(ctx context.Context, projectName string, options api.ExportOptions) error {
+ projectName = strings.ToLower(projectName)
+
+ container, err := s.getSpecifiedContainer(ctx, projectName, oneOffInclude, false, options.Service, options.Index)
+ if err != nil {
+ return err
+ }
+
+ if options.Output == "" {
+ if s.stdout().IsTerminal() {
+ return fmt.Errorf("output option is required when exporting to terminal")
+ }
+ } else if err := command.ValidateOutputPath(options.Output); err != nil {
+ return fmt.Errorf("failed to export container: %w", err)
+ }
+
+ name := getCanonicalContainerName(container)
+ s.events.On(api.Resource{
+ ID: name,
+ Text: api.StatusExporting,
+ Status: api.Working,
+ })
+
+ responseBody, err := s.apiClient().ContainerExport(ctx, container.ID)
+ if err != nil {
+ return err
+ }
+
+ defer func() {
+ if err := responseBody.Close(); err != nil {
+ s.events.On(errorEventf(name, "Failed to close response body: %s", err.Error()))
+ }
+ }()
+
+ if !s.dryRun {
+ if options.Output == "" {
+ _, err := io.Copy(s.stdout(), responseBody)
+ return err
+ } else {
+ writer, err := atomicwriter.New(options.Output, 0o600)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = writer.Close() }()
+
+ _, err = io.Copy(writer, responseBody)
+ return err
+ }
+ }
+
+ s.events.On(api.Resource{
+ ID: name,
+ Text: api.StatusExported,
+ Status: api.Done,
+ })
+
+ return nil
+}
diff --git a/pkg/compose/filters.go b/pkg/compose/filters.go
index 349c6d0e5ce..794803160d3 100644
--- a/pkg/compose/filters.go
+++ b/pkg/compose/filters.go
@@ -19,7 +19,7 @@ package compose
import (
"fmt"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
"github.com/docker/docker/api/types/filters"
)
@@ -31,6 +31,10 @@ func serviceFilter(serviceName string) filters.KeyValuePair {
return filters.Arg("label", fmt.Sprintf("%s=%s", api.ServiceLabel, serviceName))
}
+func networkFilter(name string) filters.KeyValuePair {
+ return filters.Arg("label", fmt.Sprintf("%s=%s", api.NetworkLabel, name))
+}
+
func oneOffFilter(b bool) filters.KeyValuePair {
v := "False"
if b {
@@ -46,3 +50,7 @@ func containerNumberFilter(index int) filters.KeyValuePair {
func hasProjectLabelFilter() filters.KeyValuePair {
return filters.Arg("label", api.ProjectLabel)
}
+
+func hasConfigHashLabel() filters.KeyValuePair {
+ return filters.Arg("label", api.ConfigHashLabel)
+}
diff --git a/pkg/compose/generate.go b/pkg/compose/generate.go
new file mode 100644
index 00000000000..07999fd4cef
--- /dev/null
+++ b/pkg/compose/generate.go
@@ -0,0 +1,247 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+ "maps"
+ "slices"
+ "strings"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/mount"
+ "github.com/docker/docker/api/types/network"
+)
+
+func (s *composeService) Generate(ctx context.Context, options api.GenerateOptions) (*types.Project, error) {
+ filtersListNames := filters.NewArgs()
+ filtersListIDs := filters.NewArgs()
+ for _, containerName := range options.Containers {
+ filtersListNames.Add("name", containerName)
+ filtersListIDs.Add("id", containerName)
+ }
+ containers, err := s.apiClient().ContainerList(ctx, container.ListOptions{
+ Filters: filtersListNames,
+ All: true,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ containersByIds, err := s.apiClient().ContainerList(ctx, container.ListOptions{
+ Filters: filtersListIDs,
+ All: true,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ for _, ctr := range containersByIds {
+ if !slices.ContainsFunc(containers, func(summary container.Summary) bool {
+ return summary.ID == ctr.ID
+ }) {
+ containers = append(containers, ctr)
+ }
+ }
+
+ if len(containers) == 0 {
+ return nil, fmt.Errorf("no container(s) found with the following name(s): %s", strings.Join(options.Containers, ","))
+ }
+
+ return s.createProjectFromContainers(containers, options.ProjectName)
+}
+
+func (s *composeService) createProjectFromContainers(containers []container.Summary, projectName string) (*types.Project, error) {
+ project := &types.Project{}
+ services := types.Services{}
+ networks := types.Networks{}
+ volumes := types.Volumes{}
+ secrets := types.Secrets{}
+
+ if projectName != "" {
+ project.Name = projectName
+ }
+
+ for _, c := range containers {
+ // if the container is from a previous Compose application, use the existing service name
+ serviceLabel, ok := c.Labels[api.ServiceLabel]
+ if !ok {
+ serviceLabel = getCanonicalContainerName(c)
+ }
+ service, ok := services[serviceLabel]
+ if !ok {
+ service = types.ServiceConfig{
+ Name: serviceLabel,
+ Image: c.Image,
+ Labels: c.Labels,
+ }
+ }
+ service.Scale = increment(service.Scale)
+
+ inspect, err := s.apiClient().ContainerInspect(context.Background(), c.ID)
+ if err != nil {
+ services[serviceLabel] = service
+ continue
+ }
+ s.extractComposeConfiguration(&service, inspect, volumes, secrets, networks)
+ service.Labels = cleanDockerPreviousLabels(service.Labels)
+ services[serviceLabel] = service
+ }
+
+ project.Services = services
+ project.Networks = networks
+ project.Volumes = volumes
+ project.Secrets = secrets
+ return project, nil
+}
+
+func (s *composeService) extractComposeConfiguration(service *types.ServiceConfig, inspect container.InspectResponse, volumes types.Volumes, secrets types.Secrets, networks types.Networks) {
+ service.Environment = types.NewMappingWithEquals(inspect.Config.Env)
+ if inspect.Config.Healthcheck != nil {
+ healthConfig := inspect.Config.Healthcheck
+ service.HealthCheck = s.toComposeHealthCheck(healthConfig)
+ }
+ if len(inspect.Mounts) > 0 {
+ detectedVolumes, volumeConfigs, detectedSecrets, secretsConfigs := s.toComposeVolumes(inspect.Mounts)
+ service.Volumes = append(service.Volumes, volumeConfigs...)
+ service.Secrets = append(service.Secrets, secretsConfigs...)
+ maps.Copy(volumes, detectedVolumes)
+ maps.Copy(secrets, detectedSecrets)
+ }
+ if len(inspect.NetworkSettings.Networks) > 0 {
+ detectedNetworks, networkConfigs := s.toComposeNetwork(inspect.NetworkSettings.Networks)
+ service.Networks = networkConfigs
+ maps.Copy(networks, detectedNetworks)
+ }
+ if len(inspect.HostConfig.PortBindings) > 0 {
+ for key, portBindings := range inspect.HostConfig.PortBindings {
+ for _, portBinding := range portBindings {
+ service.Ports = append(service.Ports, types.ServicePortConfig{
+ Target: uint32(key.Int()),
+ Published: portBinding.HostPort,
+ Protocol: key.Proto(),
+ HostIP: portBinding.HostIP,
+ })
+ }
+ }
+ }
+}
+
+func (s *composeService) toComposeHealthCheck(healthConfig *container.HealthConfig) *types.HealthCheckConfig {
+ var healthCheck types.HealthCheckConfig
+ healthCheck.Test = healthConfig.Test
+ if healthConfig.Timeout != 0 {
+ timeout := types.Duration(healthConfig.Timeout)
+ healthCheck.Timeout = &timeout
+ }
+ if healthConfig.Interval != 0 {
+ interval := types.Duration(healthConfig.Interval)
+ healthCheck.Interval = &interval
+ }
+ if healthConfig.StartPeriod != 0 {
+ startPeriod := types.Duration(healthConfig.StartPeriod)
+ healthCheck.StartPeriod = &startPeriod
+ }
+ if healthConfig.StartInterval != 0 {
+ startInterval := types.Duration(healthConfig.StartInterval)
+ healthCheck.StartInterval = &startInterval
+ }
+ if healthConfig.Retries != 0 {
+ retries := uint64(healthConfig.Retries)
+ healthCheck.Retries = &retries
+ }
+ return &healthCheck
+}
+
+func (s *composeService) toComposeVolumes(volumes []container.MountPoint) (map[string]types.VolumeConfig,
+ []types.ServiceVolumeConfig, map[string]types.SecretConfig, []types.ServiceSecretConfig,
+) {
+ volumeConfigs := make(map[string]types.VolumeConfig)
+ secretConfigs := make(map[string]types.SecretConfig)
+ var serviceVolumeConfigs []types.ServiceVolumeConfig
+ var serviceSecretConfigs []types.ServiceSecretConfig
+
+ for _, volume := range volumes {
+ serviceVC := types.ServiceVolumeConfig{
+ Type: string(volume.Type),
+ Source: volume.Source,
+ Target: volume.Destination,
+ ReadOnly: !volume.RW,
+ }
+ switch volume.Type {
+ case mount.TypeVolume:
+ serviceVC.Source = volume.Name
+ vol := types.VolumeConfig{}
+ if volume.Driver != "local" {
+ vol.Driver = volume.Driver
+ vol.Name = volume.Name
+ }
+ volumeConfigs[volume.Name] = vol
+ serviceVolumeConfigs = append(serviceVolumeConfigs, serviceVC)
+ case mount.TypeBind:
+ if strings.HasPrefix(volume.Destination, "/run/secrets") {
+ destination := strings.Split(volume.Destination, "/")
+ secret := types.SecretConfig{
+ Name: destination[len(destination)-1],
+ File: strings.TrimPrefix(volume.Source, "/host_mnt"),
+ }
+ secretConfigs[secret.Name] = secret
+ serviceSecretConfigs = append(serviceSecretConfigs, types.ServiceSecretConfig{
+ Source: secret.Name,
+ Target: volume.Destination,
+ })
+ } else {
+ serviceVolumeConfigs = append(serviceVolumeConfigs, serviceVC)
+ }
+ }
+ }
+ return volumeConfigs, serviceVolumeConfigs, secretConfigs, serviceSecretConfigs
+}
+
+func (s *composeService) toComposeNetwork(networks map[string]*network.EndpointSettings) (map[string]types.NetworkConfig, map[string]*types.ServiceNetworkConfig) {
+ networkConfigs := make(map[string]types.NetworkConfig)
+ serviceNetworkConfigs := make(map[string]*types.ServiceNetworkConfig)
+
+ for name, net := range networks {
+ inspect, err := s.apiClient().NetworkInspect(context.Background(), name, network.InspectOptions{})
+ if err != nil {
+ networkConfigs[name] = types.NetworkConfig{}
+ } else {
+ networkConfigs[name] = types.NetworkConfig{
+ Internal: inspect.Internal,
+ }
+ }
+ serviceNetworkConfigs[name] = &types.ServiceNetworkConfig{
+ Aliases: net.Aliases,
+ }
+ }
+ return networkConfigs, serviceNetworkConfigs
+}
+
+func cleanDockerPreviousLabels(labels types.Labels) types.Labels {
+ cleanedLabels := types.Labels{}
+ for key, value := range labels {
+ if !strings.HasPrefix(key, "com.docker.compose.") && !strings.HasPrefix(key, "desktop.docker.io") {
+ cleanedLabels[key] = value
+ }
+ }
+ return cleanedLabels
+}
diff --git a/pkg/compose/hash.go b/pkg/compose/hash.go
index e3f2aa0e337..24c6cce2e88 100644
--- a/pkg/compose/hash.go
+++ b/pkg/compose/hash.go
@@ -19,16 +19,43 @@ package compose
import (
"encoding/json"
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/types"
"github.com/opencontainers/go-digest"
)
-// ServiceHash compute configuration has for a service
+// ServiceHash computes the configuration hash for a service.
func ServiceHash(o types.ServiceConfig) (string, error) {
// remove the Build config when generating the service hash
o.Build = nil
o.PullPolicy = ""
- o.Scale = 1
+ o.Scale = nil
+ if o.Deploy != nil {
+ o.Deploy.Replicas = nil
+ }
+ o.DependsOn = nil
+ o.Profiles = nil
+
+ bytes, err := json.Marshal(o)
+ if err != nil {
+ return "", err
+ }
+ return digest.SHA256.FromBytes(bytes).Encoded(), nil
+}
+
+// NetworkHash computes the configuration hash for a network.
+func NetworkHash(o *types.NetworkConfig) (string, error) {
+ bytes, err := json.Marshal(o)
+ if err != nil {
+ return "", err
+ }
+ return digest.SHA256.FromBytes(bytes).Encoded(), nil
+}
+
+// VolumeHash computes the configuration hash for a volume.
+func VolumeHash(o types.VolumeConfig) (string, error) {
+ if o.Driver == "" { // (TODO: jhrotko) This probably should be fixed in compose-go
+ o.Driver = "local"
+ }
bytes, err := json.Marshal(o)
if err != nil {
return "", err
diff --git a/pkg/compose/hash_test.go b/pkg/compose/hash_test.go
new file mode 100644
index 00000000000..73b7f387735
--- /dev/null
+++ b/pkg/compose/hash_test.go
@@ -0,0 +1,43 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "testing"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "gotest.tools/v3/assert"
+)
+
+func TestServiceHash(t *testing.T) {
+ hash1, err := ServiceHash(serviceConfig(1))
+ assert.NilError(t, err)
+ hash2, err := ServiceHash(serviceConfig(2))
+ assert.NilError(t, err)
+ assert.Equal(t, hash1, hash2)
+}
+
+func serviceConfig(replicas int) types.ServiceConfig {
+ return types.ServiceConfig{
+ Scale: &replicas,
+ Deploy: &types.DeployConfig{
+ Replicas: &replicas,
+ },
+ Name: "foo",
+ Image: "bar",
+ }
+}
diff --git a/pkg/compose/hook.go b/pkg/compose/hook.go
new file mode 100644
index 00000000000..57c961c0630
--- /dev/null
+++ b/pkg/compose/hook.go
@@ -0,0 +1,120 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/utils"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/pkg/stdcopy"
+)
+
+func (s composeService) runHook(ctx context.Context, ctr container.Summary, service types.ServiceConfig, hook types.ServiceHook, listener api.ContainerEventListener) error {
+ wOut := utils.GetWriter(func(line string) {
+ listener(api.ContainerEvent{
+ Type: api.HookEventLog,
+ Source: getContainerNameWithoutProject(ctr) + " ->",
+ ID: ctr.ID,
+ Service: service.Name,
+ Line: line,
+ })
+ })
+ defer wOut.Close() //nolint:errcheck
+
+ detached := listener == nil
+ exec, err := s.apiClient().ContainerExecCreate(ctx, ctr.ID, container.ExecOptions{
+ User: hook.User,
+ Privileged: hook.Privileged,
+ Env: ToMobyEnv(hook.Environment),
+ WorkingDir: hook.WorkingDir,
+ Cmd: hook.Command,
+ AttachStdout: !detached,
+ AttachStderr: !detached,
+ })
+ if err != nil {
+ return err
+ }
+
+ if detached {
+ return s.runWaitExec(ctx, exec.ID, service, listener)
+ }
+
+ height, width := s.stdout().GetTtySize()
+ consoleSize := &[2]uint{height, width}
+ attach, err := s.apiClient().ContainerExecAttach(ctx, exec.ID, container.ExecAttachOptions{
+ Tty: service.Tty,
+ ConsoleSize: consoleSize,
+ })
+ if err != nil {
+ return err
+ }
+ defer attach.Close()
+
+ if service.Tty {
+ _, err = io.Copy(wOut, attach.Reader)
+ } else {
+ _, err = stdcopy.StdCopy(wOut, wOut, attach.Reader)
+ }
+ if err != nil {
+ return err
+ }
+
+ inspected, err := s.apiClient().ContainerExecInspect(ctx, exec.ID)
+ if err != nil {
+ return err
+ }
+ if inspected.ExitCode != 0 {
+ return fmt.Errorf("%s hook exited with status %d", service.Name, inspected.ExitCode)
+ }
+ return nil
+}
+
+func (s composeService) runWaitExec(ctx context.Context, execID string, service types.ServiceConfig, listener api.ContainerEventListener) error {
+ err := s.apiClient().ContainerExecStart(ctx, execID, container.ExecStartOptions{
+ Detach: listener == nil,
+ Tty: service.Tty,
+ })
+ if err != nil {
+ return nil
+ }
+
+ // We miss a ContainerExecWait API
+ tick := time.NewTicker(100 * time.Millisecond)
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+ case <-tick.C:
+ inspect, err := s.apiClient().ContainerExecInspect(ctx, execID)
+ if err != nil {
+ return nil
+ }
+ if !inspect.Running {
+ if inspect.ExitCode != 0 {
+ return fmt.Errorf("%s hook exited with status %d", service.Name, inspect.ExitCode)
+ }
+ return nil
+ }
+ }
+ }
+}
diff --git a/pkg/compose/image_pruner.go b/pkg/compose/image_pruner.go
new file mode 100644
index 00000000000..6e09d901442
--- /dev/null
+++ b/pkg/compose/image_pruner.go
@@ -0,0 +1,247 @@
+/*
+ Copyright 2022 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/containerd/errdefs"
+ "github.com/distribution/reference"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/client"
+ "golang.org/x/sync/errgroup"
+
+ "github.com/docker/compose/v5/pkg/api"
+)
+
+// ImagePruneMode controls how aggressively images associated with the project
+// are removed from the engine.
+type ImagePruneMode string
+
+const (
+ // ImagePruneNone indicates that no project images should be removed.
+ ImagePruneNone ImagePruneMode = ""
+ // ImagePruneLocal indicates that only images built locally by Compose
+ // should be removed.
+ ImagePruneLocal ImagePruneMode = "local"
+ // ImagePruneAll indicates that all project-associated images, including
+ // remote images should be removed.
+ ImagePruneAll ImagePruneMode = "all"
+)
+
+// ImagePruneOptions controls the behavior of image pruning.
+type ImagePruneOptions struct {
+ Mode ImagePruneMode
+
+ // RemoveOrphans will result in the removal of images that were built for
+ // the project regardless of whether they are for a known service if true.
+ RemoveOrphans bool
+}
+
+// ImagePruner handles image removal during Compose `down` operations.
+type ImagePruner struct {
+ client client.ImageAPIClient
+ project *types.Project
+}
+
+// NewImagePruner creates an ImagePruner object for a project.
+func NewImagePruner(imageClient client.ImageAPIClient, project *types.Project) *ImagePruner {
+ return &ImagePruner{
+ client: imageClient,
+ project: project,
+ }
+}
+
+// ImagesToPrune returns the set of images that should be removed.
+func (p *ImagePruner) ImagesToPrune(ctx context.Context, opts ImagePruneOptions) ([]string, error) {
+ if opts.Mode == ImagePruneNone {
+ return nil, nil
+ } else if opts.Mode != ImagePruneLocal && opts.Mode != ImagePruneAll {
+ return nil, fmt.Errorf("unsupported image prune mode: %s", opts.Mode)
+ }
+ var images []string
+
+ if opts.Mode == ImagePruneAll {
+ namedImages, err := p.namedImages(ctx)
+ if err != nil {
+ return nil, err
+ }
+ images = append(images, namedImages...)
+ }
+
+ projectImages, err := p.labeledLocalImages(ctx)
+ if err != nil {
+ return nil, err
+ }
+ for _, img := range projectImages {
+ if len(img.RepoTags) == 0 {
+ // currently, we're only pruning the tagged references, but
+ // if we start removing the dangling images and grouping by
+ // service, we can remove this (and should rely on `Image::ID`)
+ continue
+ }
+
+ var shouldPrune bool
+ if opts.RemoveOrphans {
+ // indiscriminately prune all project images even if they're not
+ // referenced by the current Compose state (e.g. the service was
+ // removed from YAML)
+ shouldPrune = true
+ } else {
+ // only prune the image if it belongs to a known service for the project.
+ if _, err := p.project.GetService(img.Labels[api.ServiceLabel]); err == nil {
+ shouldPrune = true
+ }
+ }
+
+ if shouldPrune {
+ images = append(images, img.RepoTags[0])
+ }
+ }
+
+ fallbackImages, err := p.unlabeledLocalImages(ctx)
+ if err != nil {
+ return nil, err
+ }
+ images = append(images, fallbackImages...)
+
+ images = normalizeAndDedupeImages(images)
+ return images, nil
+}
+
+// namedImages are those that are explicitly named in the service config.
+//
+// These could be registry-only images (no local build), hybrid (support build
+// as a fallback if cannot pull), or local-only (image does not exist in a
+// registry).
+func (p *ImagePruner) namedImages(ctx context.Context) ([]string, error) {
+ var images []string
+ for _, service := range p.project.Services {
+ if service.Image == "" {
+ continue
+ }
+ images = append(images, service.Image)
+ }
+ return p.filterImagesByExistence(ctx, images)
+}
+
+// labeledLocalImages are images that were locally-built by a current version of
+// Compose (it did not always label built images).
+//
+// The image name could either have been defined by the user or implicitly
+// created from the project + service name.
+func (p *ImagePruner) labeledLocalImages(ctx context.Context) ([]image.Summary, error) {
+ imageListOpts := image.ListOptions{
+ Filters: filters.NewArgs(
+ projectFilter(p.project.Name),
+ // TODO(milas): we should really clean up the dangling images as
+ // well (historically we have NOT); need to refactor this to handle
+ // it gracefully without producing confusing CLI output, i.e. we
+ // do not want to print out a bunch of untagged/dangling image IDs,
+ // they should be grouped into a logical operation for the relevant
+ // service
+ filters.Arg("dangling", "false"),
+ ),
+ }
+ projectImages, err := p.client.ImageList(ctx, imageListOpts)
+ if err != nil {
+ return nil, err
+ }
+ return projectImages, nil
+}
+
+// unlabeledLocalImages are images that match the implicit naming convention
+// for locally-built images but did not get labeled, presumably because they
+// were produced by an older version of Compose.
+//
+// This is transitional to ensure `down` continues to work as expected on
+// projects built/launched by previous versions of Compose. It can safely
+// be removed after some time.
+func (p *ImagePruner) unlabeledLocalImages(ctx context.Context) ([]string, error) {
+ var images []string
+ for _, service := range p.project.Services {
+ if service.Image != "" {
+ continue
+ }
+ img := api.GetImageNameOrDefault(service, p.project.Name)
+ images = append(images, img)
+ }
+ return p.filterImagesByExistence(ctx, images)
+}
+
+// filterImagesByExistence returns the subset of images that exist in the
+// engine store.
+//
+// NOTE: Any transient errors communicating with the API will result in an
+// image being returned as "existing", as this method is exclusively used to
+// find images to remove, so the worst case of being conservative here is an
+// attempt to remove an image that doesn't exist, which will cause a warning
+// but is otherwise harmless.
+func (p *ImagePruner) filterImagesByExistence(ctx context.Context, imageNames []string) ([]string, error) {
+ var mu sync.Mutex
+ var ret []string
+
+ eg, ctx := errgroup.WithContext(ctx)
+ for _, img := range imageNames {
+ eg.Go(func() error {
+ _, err := p.client.ImageInspect(ctx, img)
+ if errdefs.IsNotFound(err) {
+ // err on the side of caution: only skip if we successfully
+ // queried the API and got back a definitive "not exists"
+ return nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ ret = append(ret, img)
+ return nil
+ })
+ }
+
+ if err := eg.Wait(); err != nil {
+ return nil, err
+ }
+
+ return ret, nil
+}
+
+// normalizeAndDedupeImages returns the unique set of images after normalization.
+func normalizeAndDedupeImages(images []string) []string {
+ seen := make(map[string]struct{}, len(images))
+ for _, img := range images {
+ // since some references come from user input (service.image) and some
+ // come from the engine API, we standardize them, opting for the
+ // familiar name format since they'll also be displayed in the CLI
+ ref, err := reference.ParseNormalizedNamed(img)
+ if err == nil {
+ ref = reference.TagNameOnly(ref)
+ img = reference.FamiliarString(ref)
+ }
+ seen[img] = struct{}{}
+ }
+ ret := make([]string, 0, len(seen))
+ for v := range seen {
+ ret = append(ret, v)
+ }
+ // ensure a deterministic return result - the actual ordering is not useful
+ sort.Strings(ret)
+ return ret
+}
diff --git a/pkg/compose/images.go b/pkg/compose/images.go
index 787db8bcf78..f94eeba9789 100644
--- a/pkg/compose/images.go
+++ b/pkg/compose/images.go
@@ -19,92 +19,141 @@ package compose
import (
"context"
"fmt"
+ "slices"
"strings"
"sync"
+ "time"
- moby "github.com/docker/docker/api/types"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/platforms"
+ "github.com/distribution/reference"
+ "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/errdefs"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/docker/docker/client"
"golang.org/x/sync/errgroup"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/utils"
+ "github.com/docker/compose/v5/pkg/api"
)
-func (s *composeService) Images(ctx context.Context, projectName string, options api.ImagesOptions) ([]api.ImageSummary, error) {
- allContainers, err := s.apiClient().ContainerList(ctx, moby.ContainerListOptions{
+func (s *composeService) Images(ctx context.Context, projectName string, options api.ImagesOptions) (map[string]api.ImageSummary, error) {
+ projectName = strings.ToLower(projectName)
+ allContainers, err := s.apiClient().ContainerList(ctx, container.ListOptions{
All: true,
Filters: filters.NewArgs(projectFilter(projectName)),
})
if err != nil {
return nil, err
}
- containers := []moby.Container{}
+ var containers []container.Summary
if len(options.Services) > 0 {
// filter service containers
for _, c := range allContainers {
- if utils.StringContains(options.Services, c.Labels[api.ServiceLabel]) {
+ if slices.Contains(options.Services, c.Labels[api.ServiceLabel]) {
containers = append(containers, c)
-
}
}
} else {
containers = allContainers
}
- imageIDs := []string{}
- // aggregate image IDs
- for _, c := range containers {
- if !utils.StringContains(imageIDs, c.ImageID) {
- imageIDs = append(imageIDs, c.ImageID)
- }
- }
- images, err := s.getImages(ctx, imageIDs)
+ version, err := s.RuntimeVersion(ctx)
if err != nil {
return nil, err
}
- summary := make([]api.ImageSummary, len(containers))
- for i, container := range containers {
- img, ok := images[container.ImageID]
- if !ok {
- return nil, fmt.Errorf("failed to retrieve image for container %s", getCanonicalContainerName(container))
- }
+ withPlatform := versions.GreaterThanOrEqualTo(version, "1.49")
+
+ summary := map[string]api.ImageSummary{}
+ var mux sync.Mutex
+ eg, ctx := errgroup.WithContext(ctx)
+ for _, c := range containers {
+ eg.Go(func() error {
+ image, err := s.apiClient().ImageInspect(ctx, c.Image)
+ if err != nil {
+ return err
+ }
+ id := image.ID // platform-specific image ID can't be combined with image tag, see https://github.com/moby/moby/issues/49995
+
+ if withPlatform && c.ImageManifestDescriptor != nil && c.ImageManifestDescriptor.Platform != nil {
+ image, err = s.apiClient().ImageInspect(ctx, c.Image, client.ImageInspectWithPlatform(c.ImageManifestDescriptor.Platform))
+ if err != nil {
+ return err
+ }
+ }
- summary[i] = img
- summary[i].ContainerName = getCanonicalContainerName(container)
+ var repository, tag string
+ ref, err := reference.ParseDockerRef(c.Image)
+ if err == nil {
+ // ParseDockerRef will reject a local image ID
+ repository = reference.FamiliarName(ref)
+ if tagged, ok := ref.(reference.Tagged); ok {
+ tag = tagged.Tag()
+ }
+ }
+
+ var created *time.Time
+ if image.Created != "" {
+ t, err := time.Parse(time.RFC3339Nano, image.Created)
+ if err != nil {
+ return err
+ }
+ created = &t
+ }
+
+ mux.Lock()
+ defer mux.Unlock()
+ summary[getCanonicalContainerName(c)] = api.ImageSummary{
+ ID: id,
+ Repository: repository,
+ Tag: tag,
+ Platform: platforms.Platform{
+ Architecture: image.Architecture,
+ OS: image.Os,
+ OSVersion: image.OsVersion,
+ Variant: image.Variant,
+ },
+ Size: image.Size,
+ Created: created,
+ LastTagTime: image.Metadata.LastTagTime,
+ }
+ return nil
+ })
}
- return summary, nil
+
+ err = eg.Wait()
+ return summary, err
}
-func (s *composeService) getImages(ctx context.Context, images []string) (map[string]api.ImageSummary, error) {
+func (s *composeService) getImageSummaries(ctx context.Context, repoTags []string) (map[string]api.ImageSummary, error) {
summary := map[string]api.ImageSummary{}
l := sync.Mutex{}
eg, ctx := errgroup.WithContext(ctx)
- for _, img := range images {
- img := img
+ for _, repoTag := range repoTags {
eg.Go(func() error {
- inspect, _, err := s.apiClient().ImageInspectWithRaw(ctx, img)
+ inspect, err := s.apiClient().ImageInspect(ctx, repoTag)
if err != nil {
if errdefs.IsNotFound(err) {
return nil
}
- return err
+ return fmt.Errorf("unable to get image '%s': %w", repoTag, err)
}
tag := ""
repository := ""
- if len(inspect.RepoTags) > 0 {
- repotag := strings.Split(inspect.RepoTags[0], ":")
- repository = repotag[0]
- if len(repotag) > 1 {
- tag = repotag[1]
+ ref, err := reference.ParseDockerRef(repoTag)
+ if err == nil {
+ // ParseDockerRef will reject a local image ID
+ repository = reference.FamiliarName(ref)
+ if tagged, ok := ref.(reference.Tagged); ok {
+ tag = tagged.Tag()
}
}
l.Lock()
- summary[img] = api.ImageSummary{
- ID: inspect.ID,
- Repository: repository,
- Tag: tag,
- Size: inspect.Size,
+ summary[repoTag] = api.ImageSummary{
+ ID: inspect.ID,
+ Repository: repository,
+ Tag: tag,
+ Size: inspect.Size,
+ LastTagTime: inspect.Metadata.LastTagTime,
}
l.Unlock()
return nil
diff --git a/pkg/compose/images_test.go b/pkg/compose/images_test.go
new file mode 100644
index 00000000000..9c0367d8d69
--- /dev/null
+++ b/pkg/compose/images_test.go
@@ -0,0 +1,110 @@
+/*
+ Copyright 2024 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "go.uber.org/mock/gomock"
+ "gotest.tools/v3/assert"
+
+ compose "github.com/docker/compose/v5/pkg/api"
+)
+
+func TestImages(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
+
+ ctx := context.Background()
+ args := filters.NewArgs(projectFilter(strings.ToLower(testProject)))
+ listOpts := container.ListOptions{All: true, Filters: args}
+ api.EXPECT().ServerVersion(gomock.Any()).Return(types.Version{APIVersion: "1.96"}, nil).AnyTimes()
+ timeStr1 := "2025-06-06T06:06:06.000000000Z"
+ created1, _ := time.Parse(time.RFC3339Nano, timeStr1)
+ timeStr2 := "2025-03-03T03:03:03.000000000Z"
+ created2, _ := time.Parse(time.RFC3339Nano, timeStr2)
+ image1 := imageInspect("image1", "foo:1", 12345, timeStr1)
+ image2 := imageInspect("image2", "bar:2", 67890, timeStr2)
+ api.EXPECT().ImageInspect(anyCancellableContext(), "foo:1").Return(image1, nil).MaxTimes(2)
+ api.EXPECT().ImageInspect(anyCancellableContext(), "bar:2").Return(image2, nil)
+ c1 := containerDetail("service1", "123", "running", "foo:1")
+ c2 := containerDetail("service1", "456", "running", "bar:2")
+ c2.Ports = []container.Port{{PublicPort: 80, PrivatePort: 90, IP: "localhost"}}
+ c3 := containerDetail("service2", "789", "exited", "foo:1")
+ api.EXPECT().ContainerList(ctx, listOpts).Return([]container.Summary{c1, c2, c3}, nil)
+
+ images, err := tested.Images(ctx, strings.ToLower(testProject), compose.ImagesOptions{})
+
+ expected := map[string]compose.ImageSummary{
+ "123": {
+ ID: "image1",
+ Repository: "foo",
+ Tag: "1",
+ Size: 12345,
+ Created: &created1,
+ },
+ "456": {
+ ID: "image2",
+ Repository: "bar",
+ Tag: "2",
+ Size: 67890,
+ Created: &created2,
+ },
+ "789": {
+ ID: "image1",
+ Repository: "foo",
+ Tag: "1",
+ Size: 12345,
+ Created: &created1,
+ },
+ }
+ assert.NilError(t, err)
+ assert.DeepEqual(t, images, expected)
+}
+
+func imageInspect(id string, imageReference string, size int64, created string) image.InspectResponse {
+ return image.InspectResponse{
+ ID: id,
+ RepoTags: []string{
+ "someRepo:someTag",
+ imageReference,
+ },
+ Size: size,
+ Created: created,
+ }
+}
+
+func containerDetail(service string, id string, status string, imageName string) container.Summary {
+ return container.Summary{
+ ID: id,
+ Names: []string{"/" + id},
+ Image: imageName,
+ Labels: containerLabels(service, false),
+ State: status,
+ }
+}
diff --git a/pkg/compose/kill.go b/pkg/compose/kill.go
index afba4f01d33..63d369902af 100644
--- a/pkg/compose/kill.go
+++ b/pkg/compose/kill.go
@@ -18,50 +18,57 @@ package compose
import (
"context"
- "fmt"
+ "strings"
- moby "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
"golang.org/x/sync/errgroup"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/progress"
+ "github.com/docker/compose/v5/pkg/api"
)
-func (s *composeService) Kill(ctx context.Context, project string, options api.KillOptions) error {
- return progress.Run(ctx, func(ctx context.Context) error {
- return s.kill(ctx, project, options)
- })
+func (s *composeService) Kill(ctx context.Context, projectName string, options api.KillOptions) error {
+ return Run(ctx, func(ctx context.Context) error {
+ return s.kill(ctx, strings.ToLower(projectName), options)
+ }, "kill", s.events)
}
-func (s *composeService) kill(ctx context.Context, project string, options api.KillOptions) error {
- w := progress.ContextWriter(ctx)
-
+func (s *composeService) kill(ctx context.Context, projectName string, options api.KillOptions) error {
services := options.Services
var containers Containers
- containers, err := s.getContainers(ctx, project, oneOffInclude, false, services...)
+ containers, err := s.getContainers(ctx, projectName, oneOffInclude, options.All, services...)
if err != nil {
return err
}
+ project := options.Project
+ if project == nil {
+ project, err = s.getProjectWithResources(ctx, containers, projectName)
+ if err != nil {
+ return err
+ }
+ }
+
+ if !options.RemoveOrphans {
+ containers = containers.filter(isService(project.ServiceNames()...))
+ }
if len(containers) == 0 {
- fmt.Fprintf(s.stderr(), "no container to kill")
+ return api.ErrNoResources
}
eg, ctx := errgroup.WithContext(ctx)
- containers.
- forEach(func(container moby.Container) {
- eg.Go(func() error {
- eventName := getContainerProgressName(container)
- w.Event(progress.KillingEvent(eventName))
- err := s.apiClient().ContainerKill(ctx, container.ID, options.Signal)
- if err != nil {
- w.Event(progress.ErrorMessageEvent(eventName, "Error while Killing"))
- return err
- }
- w.Event(progress.KilledEvent(eventName))
- return nil
- })
+ containers.forEach(func(ctr container.Summary) {
+ eg.Go(func() error {
+ eventName := getContainerProgressName(ctr)
+ s.events.On(killingEvent(eventName))
+ err := s.apiClient().ContainerKill(ctx, ctr.ID, options.Signal)
+ if err != nil {
+ s.events.On(errorEvent(eventName, "Error while Killing"))
+ return err
+ }
+ s.events.On(killedEvent(eventName))
+ return nil
})
+ })
return eg.Wait()
}
diff --git a/pkg/compose/kill_test.go b/pkg/compose/kill_test.go
index 9680afe30f4..b25dc48e6f8 100644
--- a/pkg/compose/kill_test.go
+++ b/pkg/compose/kill_test.go
@@ -18,44 +18,53 @@ package compose
import (
"context"
+ "fmt"
"path/filepath"
"strings"
"testing"
- moby "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
- "github.com/golang/mock/gomock"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/volume"
+ "go.uber.org/mock/gomock"
"gotest.tools/v3/assert"
- compose "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/mocks"
+ compose "github.com/docker/compose/v5/pkg/api"
)
const testProject = "testProject"
-var tested = composeService{}
-
func TestKillAll(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- api := mocks.NewMockAPIClient(mockCtrl)
- cli := mocks.NewMockCli(mockCtrl)
- tested.dockerCli = cli
- cli.EXPECT().Client().Return(api).AnyTimes()
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
name := strings.ToLower(testProject)
ctx := context.Background()
- api.EXPECT().ContainerList(ctx, moby.ContainerListOptions{
- Filters: filters.NewArgs(projectFilter(name)),
+ api.EXPECT().ContainerList(ctx, container.ListOptions{
+ Filters: filters.NewArgs(projectFilter(name), hasConfigHashLabel()),
}).Return(
- []moby.Container{testContainer("service1", "123", false), testContainer("service1", "456", false), testContainer("service2", "789", false)}, nil)
+ []container.Summary{testContainer("service1", "123", false), testContainer("service1", "456", false), testContainer("service2", "789", false)}, nil)
+ api.EXPECT().VolumeList(
+ gomock.Any(),
+ volume.ListOptions{
+ Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject))),
+ }).
+ Return(volume.ListResponse{}, nil)
+ api.EXPECT().NetworkList(gomock.Any(), network.ListOptions{Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject)))}).
+ Return([]network.Summary{
+ {ID: "abc123", Name: "testProject_default"},
+ }, nil)
api.EXPECT().ContainerKill(anyCancellableContext(), "123", "").Return(nil)
api.EXPECT().ContainerKill(anyCancellableContext(), "456", "").Return(nil)
api.EXPECT().ContainerKill(anyCancellableContext(), "789", "").Return(nil)
- err := tested.kill(ctx, name, compose.KillOptions{})
+ err = tested.Kill(ctx, name, compose.KillOptions{})
assert.NilError(t, err)
}
@@ -64,40 +73,55 @@ func TestKillSignal(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- api := mocks.NewMockAPIClient(mockCtrl)
- cli := mocks.NewMockCli(mockCtrl)
- tested.dockerCli = cli
- cli.EXPECT().Client().Return(api).AnyTimes()
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
name := strings.ToLower(testProject)
- listOptions := moby.ContainerListOptions{
- Filters: filters.NewArgs(projectFilter(name), serviceFilter(serviceName)),
+ listOptions := container.ListOptions{
+ Filters: filters.NewArgs(projectFilter(name), serviceFilter(serviceName), hasConfigHashLabel()),
}
ctx := context.Background()
- api.EXPECT().ContainerList(ctx, listOptions).Return([]moby.Container{testContainer(serviceName, "123", false)}, nil)
+ api.EXPECT().ContainerList(ctx, listOptions).Return([]container.Summary{testContainer(serviceName, "123", false)}, nil)
+ api.EXPECT().VolumeList(
+ gomock.Any(),
+ volume.ListOptions{
+ Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject))),
+ }).
+ Return(volume.ListResponse{}, nil)
+ api.EXPECT().NetworkList(gomock.Any(), network.ListOptions{Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject)))}).
+ Return([]network.Summary{
+ {ID: "abc123", Name: "testProject_default"},
+ }, nil)
api.EXPECT().ContainerKill(anyCancellableContext(), "123", "SIGTERM").Return(nil)
- err := tested.kill(ctx, name, compose.KillOptions{Services: []string{serviceName}, Signal: "SIGTERM"})
+ err = tested.Kill(ctx, name, compose.KillOptions{Services: []string{serviceName}, Signal: "SIGTERM"})
assert.NilError(t, err)
}
-func testContainer(service string, id string, oneOff bool) moby.Container {
- return moby.Container{
+func testContainer(service string, id string, oneOff bool) container.Summary {
+ // canonical docker names in the API start with a leading slash, some
+ // parts of Compose code will attempt to strip this off, so make sure
+ // it's consistently present
+ name := "/" + strings.TrimPrefix(id, "/")
+ return container.Summary{
ID: id,
- Names: []string{id},
+ Names: []string{name},
Labels: containerLabels(service, oneOff),
+ State: container.StateExited,
}
}
func containerLabels(service string, oneOff bool) map[string]string {
- workingdir, _ := filepath.Abs("testdata")
+ workingdir := "/src/pkg/compose/testdata"
composefile := filepath.Join(workingdir, "compose.yaml")
labels := map[string]string{
compose.ServiceLabel: service,
compose.ConfigFilesLabel: composefile,
compose.WorkingDirLabel: workingdir,
- compose.ProjectLabel: strings.ToLower(testProject)}
+ compose.ProjectLabel: strings.ToLower(testProject),
+ }
if oneOff {
labels[compose.OneoffLabel] = "True"
}
@@ -110,9 +134,16 @@ func anyCancellableContext() gomock.Matcher {
return gomock.AssignableToTypeOf(ctxWithCancel)
}
-func projectFilterListOpt() moby.ContainerListOptions {
- return moby.ContainerListOptions{
- Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject))),
+func projectFilterListOpt(withOneOff bool) container.ListOptions {
+ filter := filters.NewArgs(
+ projectFilter(strings.ToLower(testProject)),
+ hasConfigHashLabel(),
+ )
+ if !withOneOff {
+ filter.Add("label", fmt.Sprintf("%s=False", compose.OneoffLabel))
+ }
+ return container.ListOptions{
+ Filters: filter,
All: true,
}
}
diff --git a/pkg/compose/loader.go b/pkg/compose/loader.go
new file mode 100644
index 00000000000..41d414089bf
--- /dev/null
+++ b/pkg/compose/loader.go
@@ -0,0 +1,149 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "errors"
+ "os"
+ "strings"
+
+ "github.com/compose-spec/compose-go/v2/cli"
+ "github.com/compose-spec/compose-go/v2/loader"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/remote"
+)
+
+// LoadProject implements api.Compose.LoadProject
+// It loads and validates a Compose project from configuration files.
+func (s *composeService) LoadProject(ctx context.Context, options api.ProjectLoadOptions) (*types.Project, error) {
+ // Setup remote loaders (Git, OCI)
+ remoteLoaders := s.createRemoteLoaders(options)
+
+ projectOptions, err := s.buildProjectOptions(options, remoteLoaders)
+ if err != nil {
+ return nil, err
+ }
+
+ // Register all user-provided listeners (e.g., for metrics collection)
+ for _, listener := range options.LoadListeners {
+ if listener != nil {
+ projectOptions.WithListeners(listener)
+ }
+ }
+
+ if options.Compatibility {
+ api.Separator = "_"
+ }
+
+ project, err := projectOptions.LoadProject(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // Post-processing: service selection, environment resolution, etc.
+ project, err = s.postProcessProject(project, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return project, nil
+}
+
+// createRemoteLoaders creates Git and OCI remote loaders if not in offline mode
+func (s *composeService) createRemoteLoaders(options api.ProjectLoadOptions) []loader.ResourceLoader {
+ if options.Offline {
+ return nil
+ }
+ git := remote.NewGitRemoteLoader(s.dockerCli, options.Offline)
+ oci := remote.NewOCIRemoteLoader(s.dockerCli, options.Offline, options.OCI)
+ return []loader.ResourceLoader{git, oci}
+}
+
+// buildProjectOptions constructs compose-go ProjectOptions from API options
+func (s *composeService) buildProjectOptions(options api.ProjectLoadOptions, remoteLoaders []loader.ResourceLoader) (*cli.ProjectOptions, error) {
+ opts := []cli.ProjectOptionsFn{
+ cli.WithWorkingDirectory(options.WorkingDir),
+ cli.WithOsEnv,
+ }
+
+ // Add PWD if not present
+ if _, present := os.LookupEnv("PWD"); !present {
+ if pwd, err := os.Getwd(); err == nil {
+ opts = append(opts, cli.WithEnv([]string{"PWD=" + pwd}))
+ }
+ }
+
+ // Add remote loaders
+ for _, r := range remoteLoaders {
+ opts = append(opts, cli.WithResourceLoader(r))
+ }
+
+ opts = append(opts,
+ cli.WithEnvFiles(options.EnvFiles...),
+ cli.WithDotEnv,
+ cli.WithConfigFileEnv,
+ cli.WithDefaultConfigPath,
+ cli.WithEnvFiles(options.EnvFiles...),
+ cli.WithDotEnv,
+ cli.WithDefaultProfiles(options.Profiles...),
+ cli.WithName(options.ProjectName),
+ )
+
+ return cli.NewProjectOptions(options.ConfigPaths, append(options.ProjectOptionsFns, opts...)...)
+}
+
+// postProcessProject applies post-loading transformations to the project
+func (s *composeService) postProcessProject(project *types.Project, options api.ProjectLoadOptions) (*types.Project, error) {
+ if project.Name == "" {
+ return nil, errors.New("project name can't be empty. Use ProjectName option to set a valid name")
+ }
+
+ project, err := project.WithServicesEnabled(options.Services...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Add custom labels
+ for name, s := range project.Services {
+ s.CustomLabels = map[string]string{
+ api.ProjectLabel: project.Name,
+ api.ServiceLabel: name,
+ api.VersionLabel: api.ComposeVersion,
+ api.WorkingDirLabel: project.WorkingDir,
+ api.ConfigFilesLabel: strings.Join(project.ComposeFiles, ","),
+ api.OneoffLabel: "False",
+ }
+ if len(options.EnvFiles) != 0 {
+ s.CustomLabels[api.EnvironmentFileLabel] = strings.Join(options.EnvFiles, ",")
+ }
+ project.Services[name] = s
+ }
+
+ project, err = project.WithSelectedServices(options.Services)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remove unnecessary resources if not All
+ if !options.All {
+ project = project.WithoutUnnecessaryResources()
+ }
+
+ return project, nil
+}
diff --git a/pkg/compose/loader_test.go b/pkg/compose/loader_test.go
new file mode 100644
index 00000000000..cdaa1ad68b5
--- /dev/null
+++ b/pkg/compose/loader_test.go
@@ -0,0 +1,343 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/compose-spec/compose-go/v2/cli"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestLoadProject_Basic(t *testing.T) {
+ // Create a temporary compose file
+ tmpDir := t.TempDir()
+ composeFile := filepath.Join(tmpDir, "compose.yaml")
+ composeContent := `
+name: test-project
+services:
+ web:
+ image: nginx:latest
+ ports:
+ - "8080:80"
+ db:
+ image: postgres:latest
+ environment:
+ POSTGRES_PASSWORD: secret
+`
+ err := os.WriteFile(composeFile, []byte(composeContent), 0o644)
+ require.NoError(t, err)
+
+ // Create compose service
+ service, err := NewComposeService(nil)
+ require.NoError(t, err)
+
+ // Load the project
+ ctx := context.Background()
+ project, err := service.LoadProject(ctx, api.ProjectLoadOptions{
+ ConfigPaths: []string{composeFile},
+ })
+
+ // Assertions
+ require.NoError(t, err)
+ assert.NotNil(t, project)
+ assert.Equal(t, "test-project", project.Name)
+ assert.Len(t, project.Services, 2)
+ assert.Contains(t, project.Services, "web")
+ assert.Contains(t, project.Services, "db")
+
+ // Check labels were applied
+ webService := project.Services["web"]
+ assert.Equal(t, "test-project", webService.CustomLabels[api.ProjectLabel])
+ assert.Equal(t, "web", webService.CustomLabels[api.ServiceLabel])
+}
+
+func TestLoadProject_WithEnvironmentResolution(t *testing.T) {
+ tmpDir := t.TempDir()
+ composeFile := filepath.Join(tmpDir, "compose.yaml")
+ composeContent := `
+services:
+ app:
+ image: myapp:latest
+ environment:
+ - TEST_VAR=${TEST_VAR}
+ - LITERAL_VAR=literal_value
+`
+ err := os.WriteFile(composeFile, []byte(composeContent), 0o644)
+ require.NoError(t, err)
+
+ // Set environment variable
+ require.NoError(t, os.Setenv("TEST_VAR", "resolved_value"))
+ t.Cleanup(func() {
+ require.NoError(t, os.Unsetenv("TEST_VAR"))
+ })
+
+ service, err := NewComposeService(nil)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ // Test with environment resolution (default)
+ t.Run("WithResolution", func(t *testing.T) {
+ project, err := service.LoadProject(ctx, api.ProjectLoadOptions{
+ ConfigPaths: []string{composeFile},
+ })
+ require.NoError(t, err)
+
+ appService := project.Services["app"]
+ // Environment should be resolved
+ assert.NotNil(t, appService.Environment["TEST_VAR"])
+ assert.Equal(t, "resolved_value", *appService.Environment["TEST_VAR"])
+ assert.NotNil(t, appService.Environment["LITERAL_VAR"])
+ assert.Equal(t, "literal_value", *appService.Environment["LITERAL_VAR"])
+ })
+
+ // Test without environment resolution
+ t.Run("WithoutResolution", func(t *testing.T) {
+ project, err := service.LoadProject(ctx, api.ProjectLoadOptions{
+ ConfigPaths: []string{composeFile},
+ ProjectOptionsFns: []cli.ProjectOptionsFn{cli.WithoutEnvironmentResolution},
+ })
+ require.NoError(t, err)
+
+ appService := project.Services["app"]
+ // Environment should NOT be resolved, keeping raw values
+ // Note: This depends on compose-go behavior, which may still have some resolution
+ assert.NotNil(t, appService.Environment)
+ })
+}
+
+func TestLoadProject_ServiceSelection(t *testing.T) {
+ tmpDir := t.TempDir()
+ composeFile := filepath.Join(tmpDir, "compose.yaml")
+ composeContent := `
+services:
+ web:
+ image: nginx:latest
+ db:
+ image: postgres:latest
+ cache:
+ image: redis:latest
+`
+ err := os.WriteFile(composeFile, []byte(composeContent), 0o644)
+ require.NoError(t, err)
+
+ service, err := NewComposeService(nil)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ // Load only specific services
+ project, err := service.LoadProject(ctx, api.ProjectLoadOptions{
+ ConfigPaths: []string{composeFile},
+ Services: []string{"web", "db"},
+ })
+
+ require.NoError(t, err)
+ assert.Len(t, project.Services, 2)
+ assert.Contains(t, project.Services, "web")
+ assert.Contains(t, project.Services, "db")
+ assert.NotContains(t, project.Services, "cache")
+}
+
+func TestLoadProject_WithProfiles(t *testing.T) {
+ tmpDir := t.TempDir()
+ composeFile := filepath.Join(tmpDir, "compose.yaml")
+ composeContent := `
+services:
+ web:
+ image: nginx:latest
+ debug:
+ image: busybox:latest
+ profiles: ["debug"]
+`
+ err := os.WriteFile(composeFile, []byte(composeContent), 0o644)
+ require.NoError(t, err)
+
+ service, err := NewComposeService(nil)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ // Without debug profile
+ t.Run("WithoutProfile", func(t *testing.T) {
+ project, err := service.LoadProject(ctx, api.ProjectLoadOptions{
+ ConfigPaths: []string{composeFile},
+ })
+ require.NoError(t, err)
+ assert.Len(t, project.Services, 1)
+ assert.Contains(t, project.Services, "web")
+ })
+
+ // With debug profile
+ t.Run("WithProfile", func(t *testing.T) {
+ project, err := service.LoadProject(ctx, api.ProjectLoadOptions{
+ ConfigPaths: []string{composeFile},
+ Profiles: []string{"debug"},
+ })
+ require.NoError(t, err)
+ assert.Len(t, project.Services, 2)
+ assert.Contains(t, project.Services, "web")
+ assert.Contains(t, project.Services, "debug")
+ })
+}
+
+func TestLoadProject_WithLoadListeners(t *testing.T) {
+ tmpDir := t.TempDir()
+ composeFile := filepath.Join(tmpDir, "compose.yaml")
+ composeContent := `
+services:
+ web:
+ image: nginx:latest
+`
+ err := os.WriteFile(composeFile, []byte(composeContent), 0o644)
+ require.NoError(t, err)
+
+ service, err := NewComposeService(nil)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ // Track events received
+ var events []string
+ listener := func(event string, metadata map[string]any) {
+ events = append(events, event)
+ }
+
+ project, err := service.LoadProject(ctx, api.ProjectLoadOptions{
+ ConfigPaths: []string{composeFile},
+ LoadListeners: []api.LoadListener{listener},
+ })
+
+ require.NoError(t, err)
+ assert.NotNil(t, project)
+
+ // Listeners should have been called (exact events depend on compose-go implementation)
+ // The slice itself is always initialized (non-nil), even if empty
+ _ = events // events may or may not have entries depending on compose-go behavior
+}
+
+func TestLoadProject_ProjectNameInference(t *testing.T) {
+ tmpDir := t.TempDir()
+ composeFile := filepath.Join(tmpDir, "compose.yaml")
+ composeContent := `
+services:
+ web:
+ image: nginx:latest
+`
+ err := os.WriteFile(composeFile, []byte(composeContent), 0o644)
+ require.NoError(t, err)
+
+ service, err := NewComposeService(nil)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ // Without explicit project name
+ t.Run("InferredName", func(t *testing.T) {
+ project, err := service.LoadProject(ctx, api.ProjectLoadOptions{
+ ConfigPaths: []string{composeFile},
+ })
+ require.NoError(t, err)
+ // Project name should be inferred from directory
+ assert.NotEmpty(t, project.Name)
+ })
+
+ // With explicit project name
+ t.Run("ExplicitName", func(t *testing.T) {
+ project, err := service.LoadProject(ctx, api.ProjectLoadOptions{
+ ConfigPaths: []string{composeFile},
+ ProjectName: "my-custom-project",
+ })
+ require.NoError(t, err)
+ assert.Equal(t, "my-custom-project", project.Name)
+ })
+}
+
+func TestLoadProject_Compatibility(t *testing.T) {
+ tmpDir := t.TempDir()
+ composeFile := filepath.Join(tmpDir, "compose.yaml")
+ composeContent := `
+services:
+ web:
+ image: nginx:latest
+`
+ err := os.WriteFile(composeFile, []byte(composeContent), 0o644)
+ require.NoError(t, err)
+
+ service, err := NewComposeService(nil)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ // With compatibility mode
+ project, err := service.LoadProject(ctx, api.ProjectLoadOptions{
+ ConfigPaths: []string{composeFile},
+ Compatibility: true,
+ })
+
+ require.NoError(t, err)
+ assert.NotNil(t, project)
+ // In compatibility mode, separator should be "_"
+ assert.Equal(t, "_", api.Separator)
+
+ // Reset separator
+ api.Separator = "-"
+}
+
+func TestLoadProject_InvalidComposeFile(t *testing.T) {
+ tmpDir := t.TempDir()
+ composeFile := filepath.Join(tmpDir, "compose.yaml")
+ composeContent := `
+this is not valid yaml: [[[
+`
+ err := os.WriteFile(composeFile, []byte(composeContent), 0o644)
+ require.NoError(t, err)
+
+ service, err := NewComposeService(nil)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ // Should return an error for invalid YAML
+ project, err := service.LoadProject(ctx, api.ProjectLoadOptions{
+ ConfigPaths: []string{composeFile},
+ })
+
+ require.Error(t, err)
+ assert.Nil(t, project)
+}
+
+func TestLoadProject_MissingComposeFile(t *testing.T) {
+ service, err := NewComposeService(nil)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ // Should return an error for missing file
+ project, err := service.LoadProject(ctx, api.ProjectLoadOptions{
+ ConfigPaths: []string{"/nonexistent/compose.yaml"},
+ })
+
+ require.Error(t, err)
+ assert.Nil(t, project)
+}
diff --git a/pkg/compose/logs.go b/pkg/compose/logs.go
index 8d58af91a1d..c633f5c311f 100644
--- a/pkg/compose/logs.go
+++ b/pkg/compose/logs.go
@@ -20,68 +20,109 @@ import (
"context"
"io"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/utils"
- "github.com/docker/docker/api/types"
+ "github.com/containerd/errdefs"
+ "github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/stdcopy"
+ "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
+
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/utils"
)
-func (s *composeService) Logs(ctx context.Context, projectName string, consumer api.LogConsumer, options api.LogOptions) error {
- containers, err := s.getContainers(ctx, projectName, oneOffExclude, true, options.Services...)
- if err != nil {
- return err
+func (s *composeService) Logs(
+ ctx context.Context,
+ projectName string,
+ consumer api.LogConsumer,
+ options api.LogOptions,
+) error {
+ var containers Containers
+ var err error
+
+ if options.Index > 0 {
+ ctr, err := s.getSpecifiedContainer(ctx, projectName, oneOffExclude, true, options.Services[0], options.Index)
+ if err != nil {
+ return err
+ }
+ containers = append(containers, ctr)
+ } else {
+ containers, err = s.getContainers(ctx, projectName, oneOffExclude, true, options.Services...)
+ if err != nil {
+ return err
+ }
+ }
+
+ if options.Project != nil && len(options.Services) == 0 {
+ // we run with an explicit compose.yaml, so only consider services defined in this file
+ options.Services = options.Project.ServiceNames()
+ containers = containers.filter(isService(options.Services...))
}
eg, ctx := errgroup.WithContext(ctx)
- for _, c := range containers {
- c := c
+ for _, ctr := range containers {
eg.Go(func() error {
- return s.logContainers(ctx, consumer, c, options)
+ err := s.logContainer(ctx, consumer, ctr, options)
+ if errdefs.IsNotImplemented(err) {
+ logrus.Warnf("Can't retrieve logs for %q: %s", getCanonicalContainerName(ctr), err.Error())
+ return nil
+ }
+ return err
})
}
if options.Follow {
printer := newLogPrinter(consumer)
- eg.Go(func() error {
- for _, c := range containers {
- printer.HandleEvent(api.ContainerEvent{
- Type: api.ContainerEventAttach,
- Container: getContainerNameWithoutProject(c),
- Service: c.Labels[api.ServiceLabel],
+
+ monitor := newMonitor(s.apiClient(), projectName)
+ if len(options.Services) > 0 {
+ monitor.withServices(options.Services)
+ } else if options.Project != nil {
+ monitor.withServices(options.Project.ServiceNames())
+ }
+ monitor.withListener(printer.HandleEvent)
+ monitor.withListener(func(event api.ContainerEvent) {
+ if event.Type == api.ContainerEventStarted {
+ eg.Go(func() error {
+ ctr, err := s.apiClient().ContainerInspect(ctx, event.ID)
+ if err != nil {
+ return err
+ }
+
+ err = s.doLogContainer(ctx, consumer, event.Source, ctr, api.LogOptions{
+ Follow: options.Follow,
+ Since: ctr.State.StartedAt,
+ Until: options.Until,
+ Tail: options.Tail,
+ Timestamps: options.Timestamps,
+ })
+ if errdefs.IsNotImplemented(err) {
+ // ignore
+ return nil
+ }
+ return err
})
}
- return nil
})
-
eg.Go(func() error {
- return s.watchContainers(ctx, projectName, options.Services, printer.HandleEvent, containers, func(c types.Container) error {
- printer.HandleEvent(api.ContainerEvent{
- Type: api.ContainerEventAttach,
- Container: getContainerNameWithoutProject(c),
- Service: c.Labels[api.ServiceLabel],
- })
- return s.logContainers(ctx, consumer, c, options)
- })
- })
-
- eg.Go(func() error {
- _, err := printer.Run(ctx, false, "", nil)
- return err
+ // pass ctx so monitor will immediately stop on SIGINT
+ return monitor.Start(ctx)
})
}
return eg.Wait()
}
-func (s *composeService) logContainers(ctx context.Context, consumer api.LogConsumer, c types.Container, options api.LogOptions) error {
- cnt, err := s.apiClient().ContainerInspect(ctx, c.ID)
+func (s *composeService) logContainer(ctx context.Context, consumer api.LogConsumer, c container.Summary, options api.LogOptions) error {
+ ctr, err := s.apiClient().ContainerInspect(ctx, c.ID)
if err != nil {
return err
}
+ name := getContainerNameWithoutProject(c)
+ return s.doLogContainer(ctx, consumer, name, ctr, options)
+}
- service := c.Labels[api.ServiceLabel]
- r, err := s.apiClient().ContainerLogs(ctx, cnt.ID, types.ContainerLogsOptions{
+func (s *composeService) doLogContainer(ctx context.Context, consumer api.LogConsumer, name string, ctr container.InspectResponse, options api.LogOptions) error {
+ r, err := s.apiClient().ContainerLogs(ctx, ctr.ID, container.LogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: options.Follow,
@@ -93,13 +134,12 @@ func (s *composeService) logContainers(ctx context.Context, consumer api.LogCons
if err != nil {
return err
}
- defer r.Close() // nolint errcheck
+ defer r.Close() //nolint:errcheck
- name := getContainerNameWithoutProject(c)
w := utils.GetWriter(func(line string) {
- consumer.Log(name, service, line)
+ consumer.Log(name, line)
})
- if cnt.Config.Tty {
+ if ctr.Config.Tty {
_, err = io.Copy(w, r)
} else {
_, err = stdcopy.StdCopy(w, w, r)
diff --git a/pkg/compose/logs_test.go b/pkg/compose/logs_test.go
new file mode 100644
index 00000000000..d2292a753f7
--- /dev/null
+++ b/pkg/compose/logs_test.go
@@ -0,0 +1,194 @@
+/*
+ Copyright 2022 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "io"
+ "strings"
+ "sync"
+ "testing"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ containerType "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/pkg/stdcopy"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+
+ compose "github.com/docker/compose/v5/pkg/api"
+)
+
+func TestComposeService_Logs_Demux(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
+ require.NoError(t, err)
+
+ name := strings.ToLower(testProject)
+
+ ctx := context.Background()
+ api.EXPECT().ContainerList(ctx, containerType.ListOptions{
+ All: true,
+ Filters: filters.NewArgs(oneOffFilter(false), projectFilter(name), hasConfigHashLabel()),
+ }).Return(
+ []containerType.Summary{
+ testContainer("service", "c", false),
+ },
+ nil,
+ )
+
+ api.EXPECT().
+ ContainerInspect(anyCancellableContext(), "c").
+ Return(containerType.InspectResponse{
+ ContainerJSONBase: &containerType.ContainerJSONBase{ID: "c"},
+ Config: &containerType.Config{Tty: false},
+ }, nil)
+ c1Reader, c1Writer := io.Pipe()
+ t.Cleanup(func() {
+ _ = c1Reader.Close()
+ _ = c1Writer.Close()
+ })
+ c1Stdout := stdcopy.NewStdWriter(c1Writer, stdcopy.Stdout)
+ c1Stderr := stdcopy.NewStdWriter(c1Writer, stdcopy.Stderr)
+ go func() {
+ _, err := c1Stdout.Write([]byte("hello stdout\n"))
+ assert.NoError(t, err, "Writing to fake stdout")
+ _, err = c1Stderr.Write([]byte("hello stderr\n"))
+ assert.NoError(t, err, "Writing to fake stderr")
+ _ = c1Writer.Close()
+ }()
+ api.EXPECT().ContainerLogs(anyCancellableContext(), "c", gomock.Any()).
+ Return(c1Reader, nil)
+
+ opts := compose.LogOptions{
+ Project: &types.Project{
+ Services: types.Services{
+ "service": {Name: "service"},
+ },
+ },
+ }
+
+ consumer := &testLogConsumer{}
+ err = tested.Logs(ctx, name, consumer, opts)
+ require.NoError(t, err)
+
+ require.Equal(
+ t,
+ []string{"hello stdout", "hello stderr"},
+ consumer.LogsForContainer("c"),
+ )
+}
+
+// TestComposeService_Logs_ServiceFiltering ensures that we do not include
+// logs from out-of-scope services based on the Compose file vs actual state.
+//
+// NOTE(milas): This test exists because each method is currently duplicating
+// a lot of the project/service filtering logic. We should consider moving it
+// to an earlier point in the loading process, at which point this test could
+// safely be removed.
+func TestComposeService_Logs_ServiceFiltering(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
+ require.NoError(t, err)
+
+ name := strings.ToLower(testProject)
+
+ ctx := context.Background()
+ api.EXPECT().ContainerList(ctx, containerType.ListOptions{
+ All: true,
+ Filters: filters.NewArgs(oneOffFilter(false), projectFilter(name), hasConfigHashLabel()),
+ }).Return(
+ []containerType.Summary{
+ testContainer("serviceA", "c1", false),
+ testContainer("serviceA", "c2", false),
+ // serviceB will be filtered out by the project definition to
+ // ensure we ignore "orphan" containers
+ testContainer("serviceB", "c3", false),
+ testContainer("serviceC", "c4", false),
+ },
+ nil,
+ )
+
+ for _, id := range []string{"c1", "c2", "c4"} {
+ api.EXPECT().
+ ContainerInspect(anyCancellableContext(), id).
+ Return(
+ containerType.InspectResponse{
+ ContainerJSONBase: &containerType.ContainerJSONBase{ID: id},
+ Config: &containerType.Config{Tty: true},
+ },
+ nil,
+ )
+ api.EXPECT().ContainerLogs(anyCancellableContext(), id, gomock.Any()).
+ Return(io.NopCloser(strings.NewReader("hello "+id+"\n")), nil).
+ Times(1)
+ }
+
+ // this simulates passing `--filename` with a Compose file that does NOT
+ // reference `serviceB` even though it has running services for this proj
+ proj := &types.Project{
+ Services: types.Services{
+ "serviceA": {Name: "serviceA"},
+ "serviceC": {Name: "serviceC"},
+ },
+ }
+ consumer := &testLogConsumer{}
+ opts := compose.LogOptions{
+ Project: proj,
+ }
+ err = tested.Logs(ctx, name, consumer, opts)
+ require.NoError(t, err)
+
+ require.Equal(t, []string{"hello c1"}, consumer.LogsForContainer("c1"))
+ require.Equal(t, []string{"hello c2"}, consumer.LogsForContainer("c2"))
+ require.Empty(t, consumer.LogsForContainer("c3"))
+ require.Equal(t, []string{"hello c4"}, consumer.LogsForContainer("c4"))
+}
+
+type testLogConsumer struct {
+ mu sync.Mutex
+ // logs is keyed by container ID; values are log lines
+ logs map[string][]string
+}
+
+func (l *testLogConsumer) Log(containerName, message string) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ if l.logs == nil {
+ l.logs = make(map[string][]string)
+ }
+ l.logs[containerName] = append(l.logs[containerName], message)
+}
+
+func (l *testLogConsumer) Err(containerName, message string) {
+ l.Log(containerName, message)
+}
+
+func (l *testLogConsumer) Status(containerName, msg string) {}
+
+func (l *testLogConsumer) LogsForContainer(containerName string) []string {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ return l.logs[containerName]
+}
diff --git a/pkg/compose/ls.go b/pkg/compose/ls.go
index 942827ef8a6..f4fab837f23 100644
--- a/pkg/compose/ls.go
+++ b/pkg/compose/ls.go
@@ -19,19 +19,19 @@ package compose
import (
"context"
"fmt"
+ "slices"
"sort"
"strings"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/utils"
-
- moby "github.com/docker/docker/api/types"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
+ "github.com/sirupsen/logrus"
)
func (s *composeService) List(ctx context.Context, opts api.ListOptions) ([]api.Stack, error) {
- list, err := s.apiClient().ContainerList(ctx, moby.ContainerListOptions{
- Filters: filters.NewArgs(hasProjectLabelFilter()),
+ list, err := s.apiClient().ContainerList(ctx, container.ListOptions{
+ Filters: filters.NewArgs(hasProjectLabelFilter(), hasConfigHashLabel()),
All: opts.All,
})
if err != nil {
@@ -41,7 +41,7 @@ func (s *composeService) List(ctx context.Context, opts api.ListOptions) ([]api.
return containersToStacks(list)
}
-func containersToStacks(containers []moby.Container) ([]api.Stack, error) {
+func containersToStacks(containers []container.Summary) ([]api.Stack, error) {
containersByLabel, keys, err := groupContainerByLabel(containers, api.ProjectLabel)
if err != nil {
return nil, err
@@ -50,7 +50,8 @@ func containersToStacks(containers []moby.Container) ([]api.Stack, error) {
for _, project := range keys {
configFiles, err := combinedConfigFiles(containersByLabel[project])
if err != nil {
- return nil, err
+ logrus.Warn(err.Error())
+ configFiles = "N/A"
}
projects = append(projects, api.Stack{
@@ -63,17 +64,17 @@ func containersToStacks(containers []moby.Container) ([]api.Stack, error) {
return projects, nil
}
-func combinedConfigFiles(containers []moby.Container) (string, error) {
+func combinedConfigFiles(containers []container.Summary) (string, error) {
configFiles := []string{}
for _, c := range containers {
files, ok := c.Labels[api.ConfigFilesLabel]
if !ok {
- return "", fmt.Errorf("No label %q set on container %q of compose project", api.ConfigFilesLabel, c.ID)
+ return "", fmt.Errorf("no label %q set on container %q of compose project", api.ConfigFilesLabel, c.ID)
}
for _, f := range strings.Split(files, ",") {
- if !utils.StringContains(configFiles, f) {
+ if !slices.Contains(configFiles, f) {
configFiles = append(configFiles, f)
}
}
@@ -82,7 +83,7 @@ func combinedConfigFiles(containers []moby.Container) (string, error) {
return strings.Join(configFiles, ","), nil
}
-func containerToState(containers []moby.Container) []string {
+func containerToState(containers []container.Summary) []string {
statuses := []string{}
for _, c := range containers {
statuses = append(statuses, c.State)
@@ -106,24 +107,24 @@ func combinedStatus(statuses []string) string {
for _, status := range keys {
nb := nbByStatus[status]
if result != "" {
- result = result + ", "
+ result += ", "
}
- result = result + fmt.Sprintf("%s(%d)", status, nb)
+ result += fmt.Sprintf("%s(%d)", status, nb)
}
return result
}
-func groupContainerByLabel(containers []moby.Container, labelName string) (map[string][]moby.Container, []string, error) {
- containersByLabel := map[string][]moby.Container{}
+func groupContainerByLabel(containers []container.Summary, labelName string) (map[string][]container.Summary, []string, error) {
+ containersByLabel := map[string][]container.Summary{}
keys := []string{}
for _, c := range containers {
label, ok := c.Labels[labelName]
if !ok {
- return nil, nil, fmt.Errorf("No label %q set on container %q of compose project", labelName, c.ID)
+ return nil, nil, fmt.Errorf("no label %q set on container %q of compose project", labelName, c.ID)
}
labelContainers, ok := containersByLabel[label]
if !ok {
- labelContainers = []moby.Container{}
+ labelContainers = []container.Summary{}
keys = append(keys, label)
}
labelContainers = append(labelContainers, c)
diff --git a/pkg/compose/ls_test.go b/pkg/compose/ls_test.go
index 0d3b9dd5b9c..d3505a63aec 100644
--- a/pkg/compose/ls_test.go
+++ b/pkg/compose/ls_test.go
@@ -20,14 +20,14 @@ import (
"fmt"
"testing"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/api/types/container"
- moby "github.com/docker/docker/api/types"
"gotest.tools/v3/assert"
)
func TestContainersToStacks(t *testing.T) {
- containers := []moby.Container{
+ containers := []container.Summary{
{
ID: "service1",
State: "running",
@@ -69,7 +69,7 @@ func TestStacksMixedStatus(t *testing.T) {
}
func TestCombinedConfigFiles(t *testing.T) {
- containersByLabel := map[string][]moby.Container{
+ containersByLabel := map[string][]container.Summary{
"project1": {
{
ID: "service1",
@@ -104,7 +104,7 @@ func TestCombinedConfigFiles(t *testing.T) {
}{
"project1": {ConfigFiles: "/home/docker-compose.yaml", Error: nil},
"project2": {ConfigFiles: "/home/project2-docker-compose.yaml", Error: nil},
- "project3": {ConfigFiles: "", Error: fmt.Errorf("No label %q set on container %q of compose project", api.ConfigFilesLabel, "service4")},
+ "project3": {ConfigFiles: "", Error: fmt.Errorf("no label %q set on container %q of compose project", api.ConfigFilesLabel, "service4")},
}
for project, containers := range containersByLabel {
@@ -113,7 +113,7 @@ func TestCombinedConfigFiles(t *testing.T) {
expected := testData[project]
if expected.Error != nil {
- assert.Equal(t, err.Error(), expected.Error.Error())
+ assert.Error(t, err, expected.Error.Error())
} else {
assert.Equal(t, err, expected.Error)
}
diff --git a/pkg/compose/metrics.go b/pkg/compose/metrics.go
deleted file mode 100644
index 2cdc927ea62..00000000000
--- a/pkg/compose/metrics.go
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package compose
-
-// FailureCategory sruct regrouping metrics failure status and specific exit code
-type FailureCategory struct {
- MetricsStatus string
- ExitCode int
-}
-
-const (
- // APISource is sent for API metrics
- APISource = "api"
- // SuccessStatus command success
- SuccessStatus = "success"
- // FailureStatus command failure
- FailureStatus = "failure"
- // ComposeParseFailureStatus failure while parsing compose file
- ComposeParseFailureStatus = "failure-compose-parse"
- // FileNotFoundFailureStatus failure getting compose file
- FileNotFoundFailureStatus = "failure-file-not-found"
- // CommandSyntaxFailureStatus failure reading command
- CommandSyntaxFailureStatus = "failure-cmd-syntax"
- // BuildFailureStatus failure building imge
- BuildFailureStatus = "failure-build"
- // PullFailureStatus failure pulling imge
- PullFailureStatus = "failure-pull"
- // CanceledStatus command canceled
- CanceledStatus = "canceled"
-)
-
-var (
- // FileNotFoundFailure failure for compose file not found
- FileNotFoundFailure = FailureCategory{MetricsStatus: FileNotFoundFailureStatus, ExitCode: 14}
- // ComposeParseFailure failure for composefile parse error
- ComposeParseFailure = FailureCategory{MetricsStatus: ComposeParseFailureStatus, ExitCode: 15}
- // CommandSyntaxFailure failure for command line syntax
- CommandSyntaxFailure = FailureCategory{MetricsStatus: CommandSyntaxFailureStatus, ExitCode: 16}
- //BuildFailure failure while building images.
- BuildFailure = FailureCategory{MetricsStatus: BuildFailureStatus, ExitCode: 17}
- // PullFailure failure while pulling image
- PullFailure = FailureCategory{MetricsStatus: PullFailureStatus, ExitCode: 18}
-)
-
-//ByExitCode retrieve FailureCategory based on command exit code
-func ByExitCode(exitCode int) FailureCategory {
- switch exitCode {
- case 0:
- return FailureCategory{MetricsStatus: SuccessStatus, ExitCode: 0}
- case 14:
- return FileNotFoundFailure
- case 15:
- return ComposeParseFailure
- case 16:
- return CommandSyntaxFailure
- case 17:
- return BuildFailure
- case 18:
- return PullFailure
- case 130:
- return FailureCategory{MetricsStatus: CanceledStatus, ExitCode: exitCode}
- default:
- return FailureCategory{MetricsStatus: FailureStatus, ExitCode: exitCode}
- }
-}
diff --git a/pkg/compose/model.go b/pkg/compose/model.go
new file mode 100644
index 00000000000..10af138753c
--- /dev/null
+++ b/pkg/compose/model.go
@@ -0,0 +1,263 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "fmt"
+ "os/exec"
+ "slices"
+ "strconv"
+ "strings"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/containerd/errdefs"
+ "github.com/docker/cli/cli-plugins/manager"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/spf13/cobra"
+ "golang.org/x/sync/errgroup"
+)
+
+func (s *composeService) ensureModels(ctx context.Context, project *types.Project, quietPull bool) error {
+ if len(project.Models) == 0 {
+ return nil
+ }
+
+ api, err := s.newModelAPI(project)
+ if err != nil {
+ return err
+ }
+ defer api.Close()
+ availableModels, err := api.ListModels(ctx)
+
+ eg, ctx := errgroup.WithContext(ctx)
+ eg.Go(func() error {
+ return api.SetModelVariables(ctx, project)
+ })
+
+ for name, config := range project.Models {
+ if config.Name == "" {
+ config.Name = name
+ }
+ eg.Go(func() error {
+ if !slices.Contains(availableModels, config.Model) {
+ err = api.PullModel(ctx, config, quietPull, s.events)
+ if err != nil {
+ return err
+ }
+ }
+ return api.ConfigureModel(ctx, config, s.events)
+ })
+ }
+ return eg.Wait()
+}
+
+type modelAPI struct {
+ path string
+ env []string
+ prepare func(ctx context.Context, cmd *exec.Cmd) error
+ cleanup func()
+}
+
+func (s *composeService) newModelAPI(project *types.Project) (*modelAPI, error) {
+ dockerModel, err := manager.GetPlugin("model", s.dockerCli, &cobra.Command{})
+ if err != nil {
+ if errdefs.IsNotFound(err) {
+ return nil, fmt.Errorf("'models' support requires Docker Model plugin")
+ }
+ return nil, err
+ }
+ endpoint, cleanup, err := s.propagateDockerEndpoint()
+ if err != nil {
+ return nil, err
+ }
+ return &modelAPI{
+ path: dockerModel.Path,
+ prepare: func(ctx context.Context, cmd *exec.Cmd) error {
+ return s.prepareShellOut(ctx, project.Environment, cmd)
+ },
+ cleanup: cleanup,
+ env: append(project.Environment.Values(), endpoint...),
+ }, nil
+}
+
+func (m *modelAPI) Close() {
+ m.cleanup()
+}
+
+func (m *modelAPI) PullModel(ctx context.Context, model types.ModelConfig, quietPull bool, events api.EventProcessor) error {
+ events.On(api.Resource{
+ ID: model.Name,
+ Status: api.Working,
+ Text: "Pulling",
+ })
+
+ cmd := exec.CommandContext(ctx, m.path, "pull", model.Model)
+ err := m.prepare(ctx, cmd)
+ if err != nil {
+ return err
+ }
+ stream, err := cmd.StdoutPipe()
+ if err != nil {
+ return err
+ }
+
+ err = cmd.Start()
+ if err != nil {
+ return err
+ }
+
+ scanner := bufio.NewScanner(stream)
+ for scanner.Scan() {
+ msg := scanner.Text()
+ if msg == "" {
+ continue
+ }
+
+ if !quietPull {
+ events.On(api.Resource{
+ ID: model.Name,
+ Status: api.Working,
+ Text: api.StatusPulling,
+ })
+ }
+ }
+
+ err = cmd.Wait()
+ if err != nil {
+ events.On(errorEvent(model.Name, err.Error()))
+ }
+ events.On(api.Resource{
+ ID: model.Name,
+ Status: api.Working,
+ Text: api.StatusPulled,
+ })
+ return err
+}
+
+func (m *modelAPI) ConfigureModel(ctx context.Context, config types.ModelConfig, events api.EventProcessor) error {
+ events.On(api.Resource{
+ ID: config.Name,
+ Status: api.Working,
+ Text: "Configuring",
+ })
+ // configure [--context-size=] MODEL [-- ]
+ args := []string{"configure"}
+ if config.ContextSize > 0 {
+ args = append(args, "--context-size", strconv.Itoa(config.ContextSize))
+ }
+ args = append(args, config.Model)
+ if len(config.RuntimeFlags) != 0 {
+ args = append(args, "--")
+ args = append(args, config.RuntimeFlags...)
+ }
+ cmd := exec.CommandContext(ctx, m.path, args...)
+ err := m.prepare(ctx, cmd)
+ if err != nil {
+ return err
+ }
+ return cmd.Run()
+}
+
+func (m *modelAPI) SetModelVariables(ctx context.Context, project *types.Project) error {
+ cmd := exec.CommandContext(ctx, m.path, "status", "--json")
+ err := m.prepare(ctx, cmd)
+ if err != nil {
+ return err
+ }
+
+ statusOut, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("error checking docker-model status: %w", err)
+ }
+ type Status struct {
+ Endpoint string `json:"endpoint"`
+ }
+
+ var status Status
+ err = json.Unmarshal(statusOut, &status)
+ if err != nil {
+ return err
+ }
+
+ for _, service := range project.Services {
+ for ref, modelConfig := range service.Models {
+ model := project.Models[ref]
+ varPrefix := strings.ReplaceAll(strings.ToUpper(ref), "-", "_")
+ var variable string
+ if modelConfig != nil && modelConfig.ModelVariable != "" {
+ variable = modelConfig.ModelVariable
+ } else {
+ variable = varPrefix + "_MODEL"
+ }
+ service.Environment[variable] = &model.Model
+
+ if modelConfig != nil && modelConfig.EndpointVariable != "" {
+ variable = modelConfig.EndpointVariable
+ } else {
+ variable = varPrefix + "_URL"
+ }
+ service.Environment[variable] = &status.Endpoint
+ }
+ }
+ return nil
+}
+
+type Model struct {
+ Id string `json:"id"`
+ Tags []string `json:"tags"`
+ Created int `json:"created"`
+ Config struct {
+ Format string `json:"format"`
+ Quantization string `json:"quantization"`
+ Parameters string `json:"parameters"`
+ Architecture string `json:"architecture"`
+ Size string `json:"size"`
+ } `json:"config"`
+}
+
+func (m *modelAPI) ListModels(ctx context.Context) ([]string, error) {
+ cmd := exec.CommandContext(ctx, m.path, "ls", "--json")
+ err := m.prepare(ctx, cmd)
+ if err != nil {
+ return nil, err
+ }
+
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return nil, fmt.Errorf("error checking available models: %w", err)
+ }
+
+ type AvailableModel struct {
+ Id string `json:"id"`
+ Tags []string `json:"tags"`
+ Created int `json:"created"`
+ }
+
+ models := []AvailableModel{}
+ err = json.Unmarshal(output, &models)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling available models: %w", err)
+ }
+ var availableModels []string
+ for _, model := range models {
+ availableModels = append(availableModels, model.Tags...)
+ }
+ return availableModels, nil
+}
diff --git a/pkg/compose/monitor.go b/pkg/compose/monitor.go
new file mode 100644
index 00000000000..e7e70c88308
--- /dev/null
+++ b/pkg/compose/monitor.go
@@ -0,0 +1,215 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/containerd/errdefs"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/events"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/client"
+ "github.com/sirupsen/logrus"
+
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/utils"
+)
+
+type monitor struct {
+ apiClient client.APIClient
+ project string
+ // services tells us which service to consider and those we can ignore, maybe ran by a concurrent compose command
+ services map[string]bool
+ listeners []api.ContainerEventListener
+}
+
+func newMonitor(apiClient client.APIClient, project string) *monitor {
+ return &monitor{
+ apiClient: apiClient,
+ project: project,
+ services: map[string]bool{},
+ }
+}
+
+func (c *monitor) withServices(services []string) {
+ for _, name := range services {
+ c.services[name] = true
+ }
+}
+
+// Start runs monitor to detect application events and return after termination
+//
+//nolint:gocyclo
+func (c *monitor) Start(ctx context.Context) error {
+ // collect initial application container
+ initialState, err := c.apiClient.ContainerList(ctx, container.ListOptions{
+ All: true,
+ Filters: filters.NewArgs(
+ projectFilter(c.project),
+ oneOffFilter(false),
+ hasConfigHashLabel(),
+ ),
+ })
+ if err != nil {
+ return err
+ }
+
+ // containers is the set if container IDs the application is based on
+ containers := utils.Set[string]{}
+ for _, ctr := range initialState {
+ if len(c.services) == 0 || c.services[ctr.Labels[api.ServiceLabel]] {
+ containers.Add(ctr.ID)
+ }
+ }
+ restarting := utils.Set[string]{}
+
+ evtCh, errCh := c.apiClient.Events(ctx, events.ListOptions{
+ Filters: filters.NewArgs(
+ filters.Arg("type", "container"),
+ projectFilter(c.project)),
+ })
+ for {
+ if len(containers) == 0 {
+ return nil
+ }
+ select {
+ case <-ctx.Done():
+ return nil
+ case err := <-errCh:
+ return err
+ case event := <-evtCh:
+ if len(c.services) > 0 && !c.services[event.Actor.Attributes[api.ServiceLabel]] {
+ continue
+ }
+ ctr, err := c.getContainerSummary(event)
+ if err != nil {
+ return err
+ }
+
+ switch event.Action {
+ case events.ActionCreate:
+ if len(c.services) == 0 || c.services[ctr.Labels[api.ServiceLabel]] {
+ containers.Add(ctr.ID)
+ }
+ evtType := api.ContainerEventCreated
+ if _, ok := ctr.Labels[api.ContainerReplaceLabel]; ok {
+ evtType = api.ContainerEventRecreated
+ }
+ for _, listener := range c.listeners {
+ listener(newContainerEvent(event.TimeNano, ctr, evtType))
+ }
+ logrus.Debugf("container %s created", ctr.Name)
+ case events.ActionStart:
+ restarted := restarting.Has(ctr.ID)
+ if restarted {
+ logrus.Debugf("container %s restarted", ctr.Name)
+ for _, listener := range c.listeners {
+ listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventStarted, func(e *api.ContainerEvent) {
+ e.Restarting = restarted
+ }))
+ }
+ } else {
+ logrus.Debugf("container %s started", ctr.Name)
+ for _, listener := range c.listeners {
+ listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventStarted))
+ }
+ }
+ if len(c.services) == 0 || c.services[ctr.Labels[api.ServiceLabel]] {
+ containers.Add(ctr.ID)
+ }
+ case events.ActionRestart:
+ for _, listener := range c.listeners {
+ listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventRestarted))
+ }
+ logrus.Debugf("container %s restarted", ctr.Name)
+ case events.ActionDie:
+ logrus.Debugf("container %s exited with code %d", ctr.Name, ctr.ExitCode)
+ inspect, err := c.apiClient.ContainerInspect(ctx, event.Actor.ID)
+ if errdefs.IsNotFound(err) {
+ // Source is already removed
+ } else if err != nil {
+ return err
+ }
+
+ if inspect.State != nil && inspect.State.Restarting || inspect.State.Running {
+ // State.Restarting is set by engine when container is configured to restart on exit
+ // on ContainerRestart it doesn't (see https://github.com/moby/moby/issues/45538)
+ // container state still is reported as "running"
+ logrus.Debugf("container %s is restarting", ctr.Name)
+ restarting.Add(ctr.ID)
+ for _, listener := range c.listeners {
+ listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventExited, func(e *api.ContainerEvent) {
+ e.Restarting = true
+ }))
+ }
+ } else {
+ for _, listener := range c.listeners {
+ listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventExited))
+ }
+ containers.Remove(ctr.ID)
+ }
+ }
+ }
+ }
+}
+
+func newContainerEvent(timeNano int64, ctr *api.ContainerSummary, eventType int, opts ...func(e *api.ContainerEvent)) api.ContainerEvent {
+ name := ctr.Name
+ defaultName := getDefaultContainerName(ctr.Project, ctr.Labels[api.ServiceLabel], ctr.Labels[api.ContainerNumberLabel])
+ if name == defaultName {
+ // remove project- prefix
+ name = name[len(ctr.Project)+1:]
+ }
+
+ event := api.ContainerEvent{
+ Type: eventType,
+ Container: ctr,
+ Time: timeNano,
+ Source: name,
+ ID: ctr.ID,
+ Service: ctr.Service,
+ ExitCode: ctr.ExitCode,
+ }
+ for _, opt := range opts {
+ opt(&event)
+ }
+ return event
+}
+
+func (c *monitor) getContainerSummary(event events.Message) (*api.ContainerSummary, error) {
+ ctr := &api.ContainerSummary{
+ ID: event.Actor.ID,
+ Name: event.Actor.Attributes["name"],
+ Project: c.project,
+ Service: event.Actor.Attributes[api.ServiceLabel],
+ Labels: event.Actor.Attributes, // More than just labels, but that'c the closest the API gives us
+ }
+ if ec, ok := event.Actor.Attributes["exitCode"]; ok {
+ exitCode, err := strconv.Atoi(ec)
+ if err != nil {
+ return nil, err
+ }
+ ctr.ExitCode = exitCode
+ }
+ return ctr, nil
+}
+
+func (c *monitor) withListener(listener api.ContainerEventListener) {
+ c.listeners = append(c.listeners, listener)
+}
diff --git a/pkg/compose/pause.go b/pkg/compose/pause.go
index 1f91217ed6b..35d476eeac2 100644
--- a/pkg/compose/pause.go
+++ b/pkg/compose/pause.go
@@ -18,66 +18,70 @@ package compose
import (
"context"
+ "strings"
- moby "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
"golang.org/x/sync/errgroup"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/progress"
+ "github.com/docker/compose/v5/pkg/api"
)
-func (s *composeService) Pause(ctx context.Context, project string, options api.PauseOptions) error {
- return progress.Run(ctx, func(ctx context.Context) error {
- return s.pause(ctx, project, options)
- })
+func (s *composeService) Pause(ctx context.Context, projectName string, options api.PauseOptions) error {
+ return Run(ctx, func(ctx context.Context) error {
+ return s.pause(ctx, strings.ToLower(projectName), options)
+ }, "pause", s.events)
}
-func (s *composeService) pause(ctx context.Context, project string, options api.PauseOptions) error {
- containers, err := s.getContainers(ctx, project, oneOffExclude, false, options.Services...)
+func (s *composeService) pause(ctx context.Context, projectName string, options api.PauseOptions) error {
+ containers, err := s.getContainers(ctx, projectName, oneOffExclude, false, options.Services...)
if err != nil {
return err
}
- w := progress.ContextWriter(ctx)
+ if options.Project != nil {
+ containers = containers.filter(isService(options.Project.ServiceNames()...))
+ }
+
eg, ctx := errgroup.WithContext(ctx)
- containers.forEach(func(container moby.Container) {
+ containers.forEach(func(container container.Summary) {
eg.Go(func() error {
err := s.apiClient().ContainerPause(ctx, container.ID)
if err == nil {
eventName := getContainerProgressName(container)
- w.Event(progress.NewEvent(eventName, progress.Done, "Paused"))
+ s.events.On(newEvent(eventName, api.Done, "Paused"))
}
return err
})
-
})
return eg.Wait()
}
-func (s *composeService) UnPause(ctx context.Context, project string, options api.PauseOptions) error {
- return progress.Run(ctx, func(ctx context.Context) error {
- return s.unPause(ctx, project, options)
- })
+func (s *composeService) UnPause(ctx context.Context, projectName string, options api.PauseOptions) error {
+ return Run(ctx, func(ctx context.Context) error {
+ return s.unPause(ctx, strings.ToLower(projectName), options)
+ }, "unpause", s.events)
}
-func (s *composeService) unPause(ctx context.Context, project string, options api.PauseOptions) error {
- containers, err := s.getContainers(ctx, project, oneOffExclude, false, options.Services...)
+func (s *composeService) unPause(ctx context.Context, projectName string, options api.PauseOptions) error {
+ containers, err := s.getContainers(ctx, projectName, oneOffExclude, false, options.Services...)
if err != nil {
return err
}
- w := progress.ContextWriter(ctx)
+ if options.Project != nil {
+ containers = containers.filter(isService(options.Project.ServiceNames()...))
+ }
+
eg, ctx := errgroup.WithContext(ctx)
- containers.forEach(func(container moby.Container) {
+ containers.forEach(func(ctr container.Summary) {
eg.Go(func() error {
- err = s.apiClient().ContainerUnpause(ctx, container.ID)
+ err = s.apiClient().ContainerUnpause(ctx, ctr.ID)
if err == nil {
- eventName := getContainerProgressName(container)
- w.Event(progress.NewEvent(eventName, progress.Done, "Unpaused"))
+ eventName := getContainerProgressName(ctr)
+ s.events.On(newEvent(eventName, api.Done, "Unpaused"))
}
return err
})
-
})
return eg.Wait()
}
diff --git a/pkg/compose/plugins.go b/pkg/compose/plugins.go
new file mode 100644
index 00000000000..98bec357977
--- /dev/null
+++ b/pkg/compose/plugins.go
@@ -0,0 +1,282 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/containerd/errdefs"
+ "github.com/docker/cli/cli-plugins/manager"
+ "github.com/docker/cli/cli/config"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+)
+
+type JsonMessage struct {
+ Type string `json:"type"`
+ Message string `json:"message"`
+}
+
+const (
+ ErrorType = "error"
+ InfoType = "info"
+ SetEnvType = "setenv"
+ DebugType = "debug"
+ providerMetadataDirectory = "compose/providers"
+)
+
+var mux sync.Mutex
+
+func (s *composeService) runPlugin(ctx context.Context, project *types.Project, service types.ServiceConfig, command string) error {
+ provider := *service.Provider
+
+ plugin, err := s.getPluginBinaryPath(provider.Type)
+ if err != nil {
+ return err
+ }
+
+ cmd, err := s.setupPluginCommand(ctx, project, service, plugin, command)
+ if err != nil {
+ return err
+ }
+
+ variables, err := s.executePlugin(cmd, command, service)
+ if err != nil {
+ return err
+ }
+
+ mux.Lock()
+ defer mux.Unlock()
+ for name, s := range project.Services {
+ if _, ok := s.DependsOn[service.Name]; ok {
+ prefix := strings.ToUpper(service.Name) + "_"
+ for key, val := range variables {
+ s.Environment[prefix+key] = &val
+ }
+ project.Services[name] = s
+ }
+ }
+ return nil
+}
+
+func (s *composeService) executePlugin(cmd *exec.Cmd, command string, service types.ServiceConfig) (types.Mapping, error) {
+ var action string
+ switch command {
+ case "up":
+ s.events.On(creatingEvent(service.Name))
+ action = "create"
+ case "down":
+ s.events.On(removingEvent(service.Name))
+ action = "remove"
+ default:
+ return nil, fmt.Errorf("unsupported plugin command: %s", command)
+ }
+
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ return nil, err
+ }
+
+ err = cmd.Start()
+ if err != nil {
+ return nil, err
+ }
+
+ decoder := json.NewDecoder(stdout)
+ defer func() { _ = stdout.Close() }()
+
+ variables := types.Mapping{}
+
+ for {
+ var msg JsonMessage
+ err = decoder.Decode(&msg)
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ switch msg.Type {
+ case ErrorType:
+ s.events.On(newEvent(service.Name, api.Error, msg.Message))
+ return nil, errors.New(msg.Message)
+ case InfoType:
+ s.events.On(newEvent(service.Name, api.Working, msg.Message))
+ case SetEnvType:
+ key, val, found := strings.Cut(msg.Message, "=")
+ if !found {
+ return nil, fmt.Errorf("invalid response from plugin: %s", msg.Message)
+ }
+ variables[key] = val
+ case DebugType:
+ logrus.Debugf("%s: %s", service.Name, msg.Message)
+ default:
+ return nil, fmt.Errorf("invalid response from plugin: %s", msg.Type)
+ }
+ }
+
+ err = cmd.Wait()
+ if err != nil {
+ s.events.On(errorEvent(service.Name, err.Error()))
+ return nil, fmt.Errorf("failed to %s service provider: %s", action, err.Error())
+ }
+ switch command {
+ case "up":
+ s.events.On(createdEvent(service.Name))
+ case "down":
+ s.events.On(removedEvent(service.Name))
+ }
+ return variables, nil
+}
+
+func (s *composeService) getPluginBinaryPath(provider string) (path string, err error) {
+ if provider == "compose" {
+ return "", errors.New("'compose' is not a valid provider type")
+ }
+ plugin, err := manager.GetPlugin(provider, s.dockerCli, &cobra.Command{})
+ if err == nil {
+ path = plugin.Path
+ }
+ if errdefs.IsNotFound(err) {
+ path, err = exec.LookPath(executable(provider))
+ }
+ return path, err
+}
+
+func (s *composeService) setupPluginCommand(ctx context.Context, project *types.Project, service types.ServiceConfig, path, command string) (*exec.Cmd, error) {
+ cmdOptionsMetadata := s.getPluginMetadata(path, service.Provider.Type, project)
+ var currentCommandMetadata CommandMetadata
+ switch command {
+ case "up":
+ currentCommandMetadata = cmdOptionsMetadata.Up
+ case "down":
+ currentCommandMetadata = cmdOptionsMetadata.Down
+ }
+
+ provider := *service.Provider
+ commandMetadataIsEmpty := cmdOptionsMetadata.IsEmpty()
+ if err := currentCommandMetadata.CheckRequiredParameters(provider); !commandMetadataIsEmpty && err != nil {
+ return nil, err
+ }
+
+ args := []string{"compose", fmt.Sprintf("--project-name=%s", project.Name), command}
+ for k, v := range provider.Options {
+ for _, value := range v {
+ if _, ok := currentCommandMetadata.GetParameter(k); commandMetadataIsEmpty || ok {
+ args = append(args, fmt.Sprintf("--%s=%s", k, value))
+ }
+ }
+ }
+ args = append(args, service.Name)
+
+ cmd := exec.CommandContext(ctx, path, args...)
+
+ err := s.prepareShellOut(ctx, project.Environment, cmd)
+ if err != nil {
+ return nil, err
+ }
+ return cmd, nil
+}
+
+func (s *composeService) getPluginMetadata(path, command string, project *types.Project) ProviderMetadata {
+ cmd := exec.Command(path, "compose", "metadata")
+ err := s.prepareShellOut(context.Background(), project.Environment, cmd)
+ if err != nil {
+ logrus.Debugf("failed to prepare plugin metadata command: %v", err)
+ return ProviderMetadata{}
+ }
+ stdout := &bytes.Buffer{}
+ cmd.Stdout = stdout
+
+ if err := cmd.Run(); err != nil {
+ logrus.Debugf("failed to start plugin metadata command: %v", err)
+ return ProviderMetadata{}
+ }
+
+ var metadata ProviderMetadata
+ if err := json.Unmarshal(stdout.Bytes(), &metadata); err != nil {
+ output, _ := io.ReadAll(stdout)
+ logrus.Debugf("failed to decode plugin metadata: %v - %s", err, output)
+ return ProviderMetadata{}
+ }
+ // Save metadata into docker home directory to be used by Docker LSP tool
+ // Just log the error as it's not a critical error for the main flow
+ metadataDir := filepath.Join(config.Dir(), providerMetadataDirectory)
+ if err := os.MkdirAll(metadataDir, 0o700); err == nil {
+ metadataFilePath := filepath.Join(metadataDir, command+".json")
+ if err := os.WriteFile(metadataFilePath, stdout.Bytes(), 0o600); err != nil {
+ logrus.Debugf("failed to save plugin metadata: %v", err)
+ }
+ } else {
+ logrus.Debugf("failed to create plugin metadata directory: %v", err)
+ }
+ return metadata
+}
+
+type ProviderMetadata struct {
+ Description string `json:"description"`
+ Up CommandMetadata `json:"up"`
+ Down CommandMetadata `json:"down"`
+}
+
+func (p ProviderMetadata) IsEmpty() bool {
+ return p.Description == "" && p.Up.Parameters == nil && p.Down.Parameters == nil
+}
+
+type CommandMetadata struct {
+ Parameters []ParameterMetadata `json:"parameters"`
+}
+
+type ParameterMetadata struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Required bool `json:"required"`
+ Type string `json:"type"`
+ Default string `json:"default,omitempty"`
+}
+
+func (c CommandMetadata) GetParameter(paramName string) (ParameterMetadata, bool) {
+ for _, p := range c.Parameters {
+ if p.Name == paramName {
+ return p, true
+ }
+ }
+ return ParameterMetadata{}, false
+}
+
+func (c CommandMetadata) CheckRequiredParameters(provider types.ServiceProviderConfig) error {
+ for _, p := range c.Parameters {
+ if p.Required {
+ if _, ok := provider.Options[p.Name]; !ok {
+ return fmt.Errorf("required parameter %q is missing from provider %q definition", p.Name, provider.Type)
+ }
+ }
+ }
+ return nil
+}
diff --git a/pkg/compose/plugins_windows.go b/pkg/compose/plugins_windows.go
new file mode 100644
index 00000000000..327b3e4c102
--- /dev/null
+++ b/pkg/compose/plugins_windows.go
@@ -0,0 +1,23 @@
+//go:build windows
+
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+func executable(s string) string {
+ return s + ".exe"
+}
diff --git a/pkg/compose/port.go b/pkg/compose/port.go
index d6035462c66..350369e6cc1 100644
--- a/pkg/compose/port.go
+++ b/pkg/compose/port.go
@@ -19,32 +19,36 @@ package compose
import (
"context"
"fmt"
+ "strings"
- "github.com/docker/compose/v2/pkg/api"
-
- moby "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/api/types/container"
)
-func (s *composeService) Port(ctx context.Context, project string, service string, port int, options api.PortOptions) (string, int, error) {
- list, err := s.apiClient().ContainerList(ctx, moby.ContainerListOptions{
- Filters: filters.NewArgs(
- projectFilter(project),
- serviceFilter(service),
- containerNumberFilter(options.Index),
- ),
- })
+func (s *composeService) Port(ctx context.Context, projectName string, service string, port uint16, options api.PortOptions) (string, int, error) {
+ projectName = strings.ToLower(projectName)
+ ctr, err := s.getSpecifiedContainer(ctx, projectName, oneOffInclude, false, service, options.Index)
if err != nil {
return "", 0, err
}
- if len(list) == 0 {
- return "", 0, fmt.Errorf("no container found for %s_%d", service, options.Index)
- }
- container := list[0]
- for _, p := range container.Ports {
- if p.PrivatePort == uint16(port) && p.Type == options.Protocol {
+ for _, p := range ctr.Ports {
+ if p.PrivatePort == port && p.Type == options.Protocol {
return p.IP, int(p.PublicPort), nil
}
}
- return "", 0, err
+ return "", 0, portNotFoundError(options.Protocol, port, ctr)
+}
+
+func portNotFoundError(protocol string, port uint16, ctr container.Summary) error {
+ formatPort := func(protocol string, port uint16) string {
+ return fmt.Sprintf("%d/%s", port, protocol)
+ }
+
+ var containerPorts []string
+ for _, p := range ctr.Ports {
+ containerPorts = append(containerPorts, formatPort(p.Type, p.PublicPort))
+ }
+
+ name := strings.TrimPrefix(ctr.Names[0], "/")
+ return fmt.Errorf("no port %s for container %s: %s", formatPort(protocol, port), name, strings.Join(containerPorts, ", "))
}
diff --git a/pkg/compose/printer.go b/pkg/compose/printer.go
index 7942c56411c..6fe6b9fda6e 100644
--- a/pkg/compose/printer.go
+++ b/pkg/compose/printer.go
@@ -17,101 +17,41 @@
package compose
import (
- "context"
"fmt"
- "github.com/docker/compose/v2/pkg/api"
-
- "github.com/sirupsen/logrus"
+ "github.com/docker/compose/v5/pkg/api"
)
-// logPrinter watch application containers an collect their logs
+// logPrinter watch application containers and collect their logs
type logPrinter interface {
HandleEvent(event api.ContainerEvent)
- Run(ctx context.Context, cascadeStop bool, exitCodeFrom string, stopFn func() error) (int, error)
- Cancel()
+}
+
+type printer struct {
+ consumer api.LogConsumer
}
// newLogPrinter builds a LogPrinter passing containers logs to LogConsumer
func newLogPrinter(consumer api.LogConsumer) logPrinter {
- queue := make(chan api.ContainerEvent)
printer := printer{
consumer: consumer,
- queue: queue,
}
return &printer
}
-func (p *printer) Cancel() {
- p.queue <- api.ContainerEvent{
- Type: api.UserCancel,
- }
-}
-
-type printer struct {
- queue chan api.ContainerEvent
- consumer api.LogConsumer
-}
-
func (p *printer) HandleEvent(event api.ContainerEvent) {
- p.queue <- event
-}
-
-//nolint:gocyclo
-func (p *printer) Run(ctx context.Context, cascadeStop bool, exitCodeFrom string, stopFn func() error) (int, error) {
- var (
- aborting bool
- exitCode int
- )
- containers := map[string]struct{}{}
- for {
- select {
- case <-ctx.Done():
- return exitCode, ctx.Err()
- case event := <-p.queue:
- container := event.Container
- switch event.Type {
- case api.UserCancel:
- aborting = true
- case api.ContainerEventAttach:
- if _, ok := containers[container]; ok {
- continue
- }
- containers[container] = struct{}{}
- p.consumer.Register(container)
- case api.ContainerEventExit, api.ContainerEventStopped:
- if !event.Restarting {
- delete(containers, container)
- }
- if !aborting {
- p.consumer.Status(container, fmt.Sprintf("exited with code %d", event.ExitCode))
- }
- if cascadeStop {
- if !aborting {
- aborting = true
- fmt.Println("Aborting on container exit...")
- err := stopFn()
- if err != nil {
- return 0, err
- }
- }
- if exitCodeFrom == "" {
- exitCodeFrom = event.Service
- }
- if exitCodeFrom == event.Service {
- logrus.Error(event.ExitCode)
- exitCode = event.ExitCode
- }
- }
- if len(containers) == 0 {
- // Last container terminated, done
- return exitCode, nil
- }
- case api.ContainerEventLog:
- if !aborting {
- p.consumer.Log(container, event.Service, event.Line)
- }
- }
+ switch event.Type {
+ case api.ContainerEventExited:
+ if event.Restarting {
+ p.consumer.Status(event.Source, fmt.Sprintf("exited with code %d (restarting)", event.ExitCode))
+ } else {
+ p.consumer.Status(event.Source, fmt.Sprintf("exited with code %d", event.ExitCode))
}
+ case api.ContainerEventRecreated:
+ p.consumer.Status(event.Container.Labels[api.ContainerReplaceLabel], "has been recreated")
+ case api.ContainerEventLog, api.HookEventLog:
+ p.consumer.Log(event.Source, event.Line)
+ case api.ContainerEventErr:
+ p.consumer.Err(event.Source, event.Line)
}
}
diff --git a/pkg/compose/progress.go b/pkg/compose/progress.go
new file mode 100644
index 00000000000..26f9b5d8590
--- /dev/null
+++ b/pkg/compose/progress.go
@@ -0,0 +1,176 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/docker/compose/v5/pkg/api"
+)
+
+type progressFunc func(context.Context) error
+
+func Run(ctx context.Context, pf progressFunc, operation string, bus api.EventProcessor) error {
+ bus.Start(ctx, operation)
+ err := pf(ctx)
+ bus.Done(operation, err != nil)
+ return err
+}
+
+// errorEvent creates a new Error Resource with message
+func errorEvent(id string, msg string) api.Resource {
+ return api.Resource{
+ ID: id,
+ Status: api.Error,
+ Text: api.StatusError,
+ Details: msg,
+ }
+}
+
+// errorEventf creates a new Error Resource with format message
+func errorEventf(id string, msg string, args ...any) api.Resource {
+ return errorEvent(id, fmt.Sprintf(msg, args...))
+}
+
+// creatingEvent creates a new Create in progress Resource
+func creatingEvent(id string) api.Resource {
+ return newEvent(id, api.Working, api.StatusCreating)
+}
+
+// startingEvent creates a new Starting in progress Resource
+func startingEvent(id string) api.Resource {
+ return newEvent(id, api.Working, api.StatusStarting)
+}
+
+// startedEvent creates a new Started in progress Resource
+func startedEvent(id string) api.Resource {
+ return newEvent(id, api.Done, api.StatusStarted)
+}
+
+// waiting creates a new waiting event
+func waiting(id string) api.Resource {
+ return newEvent(id, api.Working, api.StatusWaiting)
+}
+
+// healthy creates a new healthy event
+func healthy(id string) api.Resource {
+ return newEvent(id, api.Done, api.StatusHealthy)
+}
+
+// exited creates a new exited event
+func exited(id string) api.Resource {
+ return newEvent(id, api.Done, api.StatusExited)
+}
+
+// restartingEvent creates a new Restarting in progress Resource
+func restartingEvent(id string) api.Resource {
+ return newEvent(id, api.Working, api.StatusRestarting)
+}
+
+// runningEvent creates a new Running in progress Resource
+func runningEvent(id string) api.Resource {
+ return newEvent(id, api.Done, api.StatusRunning)
+}
+
+// createdEvent creates a new Created (done) Resource
+func createdEvent(id string) api.Resource {
+ return newEvent(id, api.Done, api.StatusCreated)
+}
+
+// stoppingEvent creates a new Stopping in progress Resource
+func stoppingEvent(id string) api.Resource {
+ return newEvent(id, api.Working, api.StatusStopping)
+}
+
+// stoppedEvent creates a new Stopping in progress Resource
+func stoppedEvent(id string) api.Resource {
+ return newEvent(id, api.Done, api.StatusStopped)
+}
+
+// killingEvent creates a new Killing in progress Resource
+func killingEvent(id string) api.Resource {
+ return newEvent(id, api.Working, api.StatusKilling)
+}
+
+// killedEvent creates a new Killed in progress Resource
+func killedEvent(id string) api.Resource {
+ return newEvent(id, api.Done, api.StatusKilled)
+}
+
+// removingEvent creates a new Removing in progress Resource
+func removingEvent(id string) api.Resource {
+ return newEvent(id, api.Working, api.StatusRemoving)
+}
+
+// removedEvent creates a new removed (done) Resource
+func removedEvent(id string) api.Resource {
+ return newEvent(id, api.Done, api.StatusRemoved)
+}
+
+// buildingEvent creates a new Building in progress Resource
+func buildingEvent(id string) api.Resource {
+ return newEvent("Image "+id, api.Working, api.StatusBuilding)
+}
+
+// builtEvent creates a new built (done) Resource
+func builtEvent(id string) api.Resource {
+ return newEvent("Image "+id, api.Done, api.StatusBuilt)
+}
+
+// pullingEvent creates a new pulling (in progress) Resource
+func pullingEvent(id string) api.Resource {
+ return newEvent("Image "+id, api.Working, api.StatusPulling)
+}
+
+// pulledEvent creates a new pulled (done) Resource
+func pulledEvent(id string) api.Resource {
+ return newEvent("Image "+id, api.Done, api.StatusPulled)
+}
+
+// skippedEvent creates a new Skipped Resource
+func skippedEvent(id string, reason string) api.Resource {
+ return api.Resource{
+ ID: id,
+ Status: api.Warning,
+ Text: "Skipped: " + reason,
+ }
+}
+
+// newEvent new event
+func newEvent(id string, status api.EventStatus, text string, reason ...string) api.Resource {
+ r := api.Resource{
+ ID: id,
+ Status: status,
+ Text: text,
+ }
+ if len(reason) > 0 {
+ r.Details = reason[0]
+ }
+ return r
+}
+
+type ignore struct{}
+
+func (q *ignore) Start(_ context.Context, _ string) {
+}
+
+func (q *ignore) Done(_ string, _ bool) {
+}
+
+func (q *ignore) On(_ ...api.Resource) {
+}
diff --git a/pkg/compose/ps.go b/pkg/compose/ps.go
index 7a4e0b7cc1f..fb32358aebe 100644
--- a/pkg/compose/ps.go
+++ b/pkg/compose/ps.go
@@ -19,70 +19,107 @@ package compose
import (
"context"
"sort"
+ "strings"
+ "github.com/docker/docker/api/types/container"
"golang.org/x/sync/errgroup"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
)
func (s *composeService) Ps(ctx context.Context, projectName string, options api.PsOptions) ([]api.ContainerSummary, error) {
+ projectName = strings.ToLower(projectName)
oneOff := oneOffExclude
if options.All {
oneOff = oneOffInclude
}
- containers, err := s.getContainers(ctx, projectName, oneOff, true, options.Services...)
+ containers, err := s.getContainers(ctx, projectName, oneOff, options.All, options.Services...)
if err != nil {
return nil, err
}
+ if len(options.Services) != 0 {
+ containers = containers.filter(isService(options.Services...))
+ }
summary := make([]api.ContainerSummary, len(containers))
eg, ctx := errgroup.WithContext(ctx)
- for i, container := range containers {
- i, container := i, container
+ for i, ctr := range containers {
eg.Go(func() error {
- var publishers []api.PortPublisher
- sort.Slice(container.Ports, func(i, j int) bool {
- return container.Ports[i].PrivatePort < container.Ports[j].PrivatePort
+ publishers := make([]api.PortPublisher, len(ctr.Ports))
+ sort.Slice(ctr.Ports, func(i, j int) bool {
+ return ctr.Ports[i].PrivatePort < ctr.Ports[j].PrivatePort
})
- for _, p := range container.Ports {
- publishers = append(publishers, api.PortPublisher{
+ for i, p := range ctr.Ports {
+ publishers[i] = api.PortPublisher{
URL: p.IP,
TargetPort: int(p.PrivatePort),
PublishedPort: int(p.PublicPort),
Protocol: p.Type,
- })
+ }
}
- inspect, err := s.apiClient().ContainerInspect(ctx, container.ID)
+ inspect, err := s.apiClient().ContainerInspect(ctx, ctr.ID)
if err != nil {
return err
}
var (
- health string
+ health container.HealthStatus
exitCode int
)
if inspect.State != nil {
switch inspect.State.Status {
- case "running":
+ case container.StateRunning:
if inspect.State.Health != nil {
health = inspect.State.Health.Status
}
- case "exited", "dead":
+ case container.StateExited, container.StateDead:
exitCode = inspect.State.ExitCode
}
}
+ var (
+ local int
+ mounts []string
+ )
+ for _, m := range ctr.Mounts {
+ name := m.Name
+ if name == "" {
+ name = m.Source
+ }
+ if m.Driver == "local" {
+ local++
+ }
+ mounts = append(mounts, name)
+ }
+
+ var networks []string
+ if ctr.NetworkSettings != nil {
+ for k := range ctr.NetworkSettings.Networks {
+ networks = append(networks, k)
+ }
+ }
+
summary[i] = api.ContainerSummary{
- ID: container.ID,
- Name: getCanonicalContainerName(container),
- Project: container.Labels[api.ProjectLabel],
- Service: container.Labels[api.ServiceLabel],
- Command: container.Command,
- State: container.State,
- Health: health,
- ExitCode: exitCode,
- Publishers: publishers,
+ ID: ctr.ID,
+ Name: getCanonicalContainerName(ctr),
+ Names: ctr.Names,
+ Image: ctr.Image,
+ Project: ctr.Labels[api.ProjectLabel],
+ Service: ctr.Labels[api.ServiceLabel],
+ Command: ctr.Command,
+ State: ctr.State,
+ Status: ctr.Status,
+ Created: ctr.Created,
+ Labels: ctr.Labels,
+ SizeRw: ctr.SizeRw,
+ SizeRootFs: ctr.SizeRootFs,
+ Mounts: mounts,
+ LocalVolumes: local,
+ Networks: networks,
+ Health: health,
+ ExitCode: exitCode,
+ Publishers: publishers,
}
return nil
})
diff --git a/pkg/compose/ps_test.go b/pkg/compose/ps_test.go
index 2f17616e481..c76bfdfd235 100644
--- a/pkg/compose/ps_test.go
+++ b/pkg/compose/ps_test.go
@@ -21,34 +21,31 @@ import (
"strings"
"testing"
- "github.com/golang/mock/gomock"
- "gotest.tools/v3/assert"
-
- moby "github.com/docker/docker/api/types"
+ containerType "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
+ "go.uber.org/mock/gomock"
+ "gotest.tools/v3/assert"
- compose "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/mocks"
+ compose "github.com/docker/compose/v5/pkg/api"
)
func TestPs(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- api := mocks.NewMockAPIClient(mockCtrl)
- cli := mocks.NewMockCli(mockCtrl)
- tested.dockerCli = cli
- cli.EXPECT().Client().Return(api).AnyTimes()
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
ctx := context.Background()
- args := filters.NewArgs(projectFilter(strings.ToLower(testProject)))
+ args := filters.NewArgs(projectFilter(strings.ToLower(testProject)), hasConfigHashLabel())
args.Add("label", "com.docker.compose.oneoff=False")
- listOpts := moby.ContainerListOptions{Filters: args, All: true}
- c1, inspect1 := containerDetails("service1", "123", "running", "healthy", 0)
- c2, inspect2 := containerDetails("service1", "456", "running", "", 0)
- c2.Ports = []moby.Port{{PublicPort: 80, PrivatePort: 90, IP: "localhost"}}
- c3, inspect3 := containerDetails("service2", "789", "exited", "", 130)
- api.EXPECT().ContainerList(ctx, listOpts).Return([]moby.Container{c1, c2, c3}, nil)
+ listOpts := containerType.ListOptions{Filters: args, All: false}
+ c1, inspect1 := containerDetails("service1", "123", containerType.StateRunning, containerType.Healthy, 0)
+ c2, inspect2 := containerDetails("service1", "456", containerType.StateRunning, "", 0)
+ c2.Ports = []containerType.Port{{PublicPort: 80, PrivatePort: 90, IP: "localhost"}}
+ c3, inspect3 := containerDetails("service2", "789", containerType.StateExited, "", 130)
+ api.EXPECT().ContainerList(ctx, listOpts).Return([]containerType.Summary{c1, c2, c3}, nil)
api.EXPECT().ContainerInspect(anyCancellableContext(), "123").Return(inspect1, nil)
api.EXPECT().ContainerInspect(anyCancellableContext(), "456").Return(inspect2, nil)
api.EXPECT().ContainerInspect(anyCancellableContext(), "789").Return(inspect3, nil)
@@ -56,22 +53,64 @@ func TestPs(t *testing.T) {
containers, err := tested.Ps(ctx, strings.ToLower(testProject), compose.PsOptions{})
expected := []compose.ContainerSummary{
- {ID: "123", Name: "123", Project: strings.ToLower(testProject), Service: "service1", State: "running", Health: "healthy", Publishers: nil},
- {ID: "456", Name: "456", Project: strings.ToLower(testProject), Service: "service1", State: "running", Health: "", Publishers: []compose.PortPublisher{{URL: "localhost", TargetPort: 90,
- PublishedPort: 80}}},
- {ID: "789", Name: "789", Project: strings.ToLower(testProject), Service: "service2", State: "exited", Health: "", ExitCode: 130, Publishers: nil},
+ {
+ ID: "123", Name: "123", Names: []string{"/123"}, Image: "foo", Project: strings.ToLower(testProject), Service: "service1",
+ State: containerType.StateRunning,
+ Health: containerType.Healthy,
+ Publishers: []compose.PortPublisher{},
+ Labels: map[string]string{
+ compose.ProjectLabel: strings.ToLower(testProject),
+ compose.ConfigFilesLabel: "/src/pkg/compose/testdata/compose.yaml",
+ compose.WorkingDirLabel: "/src/pkg/compose/testdata",
+ compose.ServiceLabel: "service1",
+ },
+ },
+ {
+ ID: "456", Name: "456", Names: []string{"/456"}, Image: "foo", Project: strings.ToLower(testProject), Service: "service1",
+ State: containerType.StateRunning,
+ Health: "",
+ Publishers: []compose.PortPublisher{{URL: "localhost", TargetPort: 90, PublishedPort: 80}},
+ Labels: map[string]string{
+ compose.ProjectLabel: strings.ToLower(testProject),
+ compose.ConfigFilesLabel: "/src/pkg/compose/testdata/compose.yaml",
+ compose.WorkingDirLabel: "/src/pkg/compose/testdata",
+ compose.ServiceLabel: "service1",
+ },
+ },
+ {
+ ID: "789", Name: "789", Names: []string{"/789"}, Image: "foo", Project: strings.ToLower(testProject), Service: "service2",
+ State: containerType.StateExited,
+ Health: "",
+ ExitCode: 130,
+ Publishers: []compose.PortPublisher{},
+ Labels: map[string]string{
+ compose.ProjectLabel: strings.ToLower(testProject),
+ compose.ConfigFilesLabel: "/src/pkg/compose/testdata/compose.yaml",
+ compose.WorkingDirLabel: "/src/pkg/compose/testdata",
+ compose.ServiceLabel: "service2",
+ },
+ },
}
assert.NilError(t, err)
assert.DeepEqual(t, containers, expected)
}
-func containerDetails(service string, id string, status string, health string, exitCode int) (moby.Container, moby.ContainerJSON) {
- container := moby.Container{
+func containerDetails(service string, id string, status containerType.ContainerState, health containerType.HealthStatus, exitCode int) (containerType.Summary, containerType.InspectResponse) {
+ ctr := containerType.Summary{
ID: id,
Names: []string{"/" + id},
+ Image: "foo",
Labels: containerLabels(service, false),
State: status,
}
- inspect := moby.ContainerJSON{ContainerJSONBase: &moby.ContainerJSONBase{State: &moby.ContainerState{Status: status, Health: &moby.Health{Status: health}, ExitCode: exitCode}}}
- return container, inspect
+ inspect := containerType.InspectResponse{
+ ContainerJSONBase: &containerType.ContainerJSONBase{
+ State: &containerType.State{
+ Status: status,
+ Health: &containerType.Health{Status: health},
+ ExitCode: exitCode,
+ },
+ },
+ }
+ return ctr, inspect
}
diff --git a/pkg/compose/publish.go b/pkg/compose/publish.go
new file mode 100644
index 00000000000..0163e5286cf
--- /dev/null
+++ b/pkg/compose/publish.go
@@ -0,0 +1,533 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/DefangLabs/secret-detector/pkg/scanner"
+ "github.com/DefangLabs/secret-detector/pkg/secrets"
+ "github.com/compose-spec/compose-go/v2/loader"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/distribution/reference"
+ "github.com/docker/compose/v5/internal/oci"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/compose/transform"
+ "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/image-spec/specs-go"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+)
+
+func (s *composeService) Publish(ctx context.Context, project *types.Project, repository string, options api.PublishOptions) error {
+ return Run(ctx, func(ctx context.Context) error {
+ return s.publish(ctx, project, repository, options)
+ }, "publish", s.events)
+}
+
+//nolint:gocyclo
+func (s *composeService) publish(ctx context.Context, project *types.Project, repository string, options api.PublishOptions) error {
+ project, err := project.WithProfiles([]string{"*"})
+ if err != nil {
+ return err
+ }
+ accept, err := s.preChecks(project, options)
+ if err != nil {
+ return err
+ }
+ if !accept {
+ return nil
+ }
+ err = s.Push(ctx, project, api.PushOptions{IgnoreFailures: true, ImageMandatory: true})
+ if err != nil {
+ return err
+ }
+
+ layers, err := s.createLayers(ctx, project, options)
+ if err != nil {
+ return err
+ }
+
+ s.events.On(api.Resource{
+ ID: repository,
+ Text: "publishing",
+ Status: api.Working,
+ })
+ if logrus.IsLevelEnabled(logrus.DebugLevel) {
+ logrus.Debug("publishing layers")
+ for _, layer := range layers {
+ indent, _ := json.MarshalIndent(layer, "", " ")
+ fmt.Println(string(indent))
+ }
+ }
+ if !s.dryRun {
+ named, err := reference.ParseDockerRef(repository)
+ if err != nil {
+ return err
+ }
+
+ var insecureRegistries []string
+ if options.InsecureRegistry {
+ insecureRegistries = append(insecureRegistries, reference.Domain(named))
+ }
+
+ resolver := oci.NewResolver(s.configFile(), insecureRegistries...)
+
+ descriptor, err := oci.PushManifest(ctx, resolver, named, layers, options.OCIVersion)
+ if err != nil {
+ s.events.On(api.Resource{
+ ID: repository,
+ Text: "publishing",
+ Status: api.Error,
+ })
+ return err
+ }
+
+ if options.Application {
+ manifests := []v1.Descriptor{}
+ for _, service := range project.Services {
+ ref, err := reference.ParseDockerRef(service.Image)
+ if err != nil {
+ return err
+ }
+
+ manifest, err := oci.Copy(ctx, resolver, ref, named)
+ if err != nil {
+ return err
+ }
+ manifests = append(manifests, manifest)
+ }
+
+ descriptor.Data = nil
+ index, err := json.Marshal(v1.Index{
+ Versioned: specs.Versioned{SchemaVersion: 2},
+ MediaType: v1.MediaTypeImageIndex,
+ Manifests: manifests,
+ Subject: &descriptor,
+ Annotations: map[string]string{
+ "com.docker.compose.version": api.ComposeVersion,
+ },
+ })
+ if err != nil {
+ return err
+ }
+ imagesDescriptor := v1.Descriptor{
+ MediaType: v1.MediaTypeImageIndex,
+ ArtifactType: oci.ComposeProjectArtifactType,
+ Digest: digest.FromString(string(index)),
+ Size: int64(len(index)),
+ Annotations: map[string]string{
+ "com.docker.compose.version": api.ComposeVersion,
+ },
+ Data: index,
+ }
+ err = oci.Push(ctx, resolver, reference.TrimNamed(named), imagesDescriptor)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ s.events.On(api.Resource{
+ ID: repository,
+ Text: "published",
+ Status: api.Done,
+ })
+ return nil
+}
+
+func (s *composeService) createLayers(ctx context.Context, project *types.Project, options api.PublishOptions) ([]v1.Descriptor, error) {
+ var layers []v1.Descriptor
+ extFiles := map[string]string{}
+ envFiles := map[string]string{}
+ for _, file := range project.ComposeFiles {
+ data, err := processFile(ctx, file, project, extFiles, envFiles)
+ if err != nil {
+ return nil, err
+ }
+
+ layerDescriptor := oci.DescriptorForComposeFile(file, data)
+ layers = append(layers, layerDescriptor)
+ }
+
+ extLayers, err := processExtends(ctx, project, extFiles)
+ if err != nil {
+ return nil, err
+ }
+ layers = append(layers, extLayers...)
+
+ if options.WithEnvironment {
+ layers = append(layers, envFileLayers(envFiles)...)
+ }
+
+ if options.ResolveImageDigests {
+ yaml, err := s.generateImageDigestsOverride(ctx, project)
+ if err != nil {
+ return nil, err
+ }
+
+ layerDescriptor := oci.DescriptorForComposeFile("image-digests.yaml", yaml)
+ layers = append(layers, layerDescriptor)
+ }
+ return layers, nil
+}
+
+func processExtends(ctx context.Context, project *types.Project, extFiles map[string]string) ([]v1.Descriptor, error) {
+ var layers []v1.Descriptor
+ moreExtFiles := map[string]string{}
+ for xf, hash := range extFiles {
+ data, err := processFile(ctx, xf, project, moreExtFiles, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ layerDescriptor := oci.DescriptorForComposeFile(hash, data)
+ layerDescriptor.Annotations["com.docker.compose.extends"] = "true"
+ layers = append(layers, layerDescriptor)
+ }
+ for f, hash := range moreExtFiles {
+ if _, ok := extFiles[f]; ok {
+ delete(moreExtFiles, f)
+ }
+ extFiles[f] = hash
+ }
+ if len(moreExtFiles) > 0 {
+ extLayers, err := processExtends(ctx, project, moreExtFiles)
+ if err != nil {
+ return nil, err
+ }
+ layers = append(layers, extLayers...)
+ }
+ return layers, nil
+}
+
+func processFile(ctx context.Context, file string, project *types.Project, extFiles map[string]string, envFiles map[string]string) ([]byte, error) {
+ f, err := os.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+
+ base, err := loader.LoadWithContext(ctx, types.ConfigDetails{
+ WorkingDir: project.WorkingDir,
+ Environment: project.Environment,
+ ConfigFiles: []types.ConfigFile{
+ {
+ Filename: file,
+ Content: f,
+ },
+ },
+ }, func(options *loader.Options) {
+ options.SkipValidation = true
+ options.SkipExtends = true
+ options.SkipConsistencyCheck = true
+ options.ResolvePaths = true
+ options.Profiles = project.Profiles
+ })
+ if err != nil {
+ return nil, err
+ }
+ for name, service := range base.Services {
+ for i, envFile := range service.EnvFiles {
+ hash := fmt.Sprintf("%x.env", sha256.Sum256([]byte(envFile.Path)))
+ envFiles[envFile.Path] = hash
+ f, err = transform.ReplaceEnvFile(f, name, i, hash)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if service.Extends == nil {
+ continue
+ }
+ xf := service.Extends.File
+ if xf == "" {
+ continue
+ }
+ if _, err = os.Stat(service.Extends.File); os.IsNotExist(err) {
+ // No local file, while we loaded the project successfully: This is actually a remote resource
+ continue
+ }
+
+ hash := fmt.Sprintf("%x.yaml", sha256.Sum256([]byte(xf)))
+ extFiles[xf] = hash
+
+ f, err = transform.ReplaceExtendsFile(f, name, hash)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return f, nil
+}
+
+func (s *composeService) generateImageDigestsOverride(ctx context.Context, project *types.Project) ([]byte, error) {
+ project, err := project.WithImagesResolved(ImageDigestResolver(ctx, s.configFile(), s.apiClient()))
+ if err != nil {
+ return nil, err
+ }
+ override := types.Project{
+ Services: types.Services{},
+ }
+ for name, service := range project.Services {
+ override.Services[name] = types.ServiceConfig{
+ Image: service.Image,
+ }
+ }
+ return override.MarshalYAML()
+}
+
+//nolint:gocyclo
+func (s *composeService) preChecks(project *types.Project, options api.PublishOptions) (bool, error) {
+ if ok, err := s.checkOnlyBuildSection(project); !ok || err != nil {
+ return false, err
+ }
+ bindMounts := s.checkForBindMount(project)
+ if len(bindMounts) > 0 {
+ b := strings.Builder{}
+ b.WriteString("you are about to publish bind mounts declaration within your OCI artifact.\n" +
+ "only the bind mount declarations will be added to the OCI artifact (not content)\n" +
+ "please double check that you are not mounting potential user's sensitive directories or data\n")
+ for key, val := range bindMounts {
+ b.WriteString(key)
+ for _, v := range val {
+ b.WriteString(v.String())
+ b.WriteRune('\n')
+ }
+ }
+ b.WriteString("Are you ok to publish these bind mount declarations?")
+ confirm, err := s.prompt(b.String(), false)
+ if err != nil || !confirm {
+ return false, err
+ }
+ }
+ detectedSecrets, err := s.checkForSensitiveData(project)
+ if err != nil {
+ return false, err
+ }
+ if len(detectedSecrets) > 0 {
+ b := strings.Builder{}
+ b.WriteString("you are about to publish sensitive data within your OCI artifact.\n" +
+ "please double check that you are not leaking sensitive data\n")
+ for _, val := range detectedSecrets {
+ b.WriteString(val.Type)
+ b.WriteRune('\n')
+ b.WriteString(fmt.Sprintf("%q: %s\n", val.Key, val.Value))
+ }
+ b.WriteString("Are you ok to publish these sensitive data?")
+ confirm, err := s.prompt(b.String(), false)
+ if err != nil || !confirm {
+ return false, err
+ }
+ }
+ envVariables, err := s.checkEnvironmentVariables(project, options)
+ if err != nil {
+ return false, err
+ }
+ if len(envVariables) > 0 {
+ b := strings.Builder{}
+ b.WriteString("you are about to publish environment variables within your OCI artifact.\n" +
+ "please double check that you are not leaking sensitive data\n")
+ for key, val := range envVariables {
+ b.WriteString("Service/Config ")
+ b.WriteString(key)
+ b.WriteRune('\n')
+ for k, v := range val {
+ b.WriteString(fmt.Sprintf("%s=%v\n", k, *v))
+ }
+ }
+ b.WriteString("Are you ok to publish these environment variables?")
+ confirm, err := s.prompt(b.String(), false)
+ if err != nil || !confirm {
+ return false, err
+ }
+ }
+ return true, nil
+}
+
+func (s *composeService) checkEnvironmentVariables(project *types.Project, options api.PublishOptions) (map[string]types.MappingWithEquals, error) {
+ envVarList := map[string]types.MappingWithEquals{}
+ errorList := map[string][]string{}
+
+ for _, service := range project.Services {
+ if len(service.EnvFiles) > 0 {
+ errorList[service.Name] = append(errorList[service.Name], fmt.Sprintf("service %q has env_file declared.", service.Name))
+ }
+ if len(service.Environment) > 0 {
+ errorList[service.Name] = append(errorList[service.Name], fmt.Sprintf("service %q has environment variable(s) declared.", service.Name))
+ envVarList[service.Name] = service.Environment
+ }
+ }
+
+ for _, config := range project.Configs {
+ if config.Environment != "" {
+ errorList[config.Name] = append(errorList[config.Name], fmt.Sprintf("config %q is declare as an environment variable.", config.Name))
+ envVarList[config.Name] = types.NewMappingWithEquals([]string{fmt.Sprintf("%s=%s", config.Name, config.Environment)})
+ }
+ }
+
+ if !options.WithEnvironment && len(errorList) > 0 {
+ errorMsgSuffix := "To avoid leaking sensitive data, you must either explicitly allow the sending of environment variables by using the --with-env flag,\n" +
+ "or remove sensitive data from your Compose configuration"
+ errorMsg := ""
+ for _, errors := range errorList {
+ for _, err := range errors {
+ errorMsg += fmt.Sprintf("%s\n", err)
+ }
+ }
+ return nil, fmt.Errorf("%s%s", errorMsg, errorMsgSuffix)
+
+ }
+ return envVarList, nil
+}
+
+func envFileLayers(files map[string]string) []v1.Descriptor {
+ var layers []v1.Descriptor
+ for file, hash := range files {
+ f, err := os.ReadFile(file)
+ if err != nil {
+ // if we can't read the file, skip to the next one
+ continue
+ }
+ layerDescriptor := oci.DescriptorForEnvFile(hash, f)
+ layers = append(layers, layerDescriptor)
+ }
+ return layers
+}
+
+func (s *composeService) checkOnlyBuildSection(project *types.Project) (bool, error) {
+ errorList := []string{}
+ for _, service := range project.Services {
+ if service.Image == "" && service.Build != nil {
+ errorList = append(errorList, service.Name)
+ }
+ }
+ if len(errorList) > 0 {
+ errMsg := "your Compose stack cannot be published as it only contains a build section for service(s):\n"
+ for _, serviceInError := range errorList {
+ errMsg += fmt.Sprintf("- %q\n", serviceInError)
+ }
+ return false, errors.New(errMsg)
+ }
+ return true, nil
+}
+
+func (s *composeService) checkForBindMount(project *types.Project) map[string][]types.ServiceVolumeConfig {
+ allFindings := map[string][]types.ServiceVolumeConfig{}
+ for serviceName, config := range project.Services {
+ bindMounts := []types.ServiceVolumeConfig{}
+ for _, volume := range config.Volumes {
+ if volume.Type == types.VolumeTypeBind {
+ bindMounts = append(bindMounts, volume)
+ }
+ }
+ if len(bindMounts) > 0 {
+ allFindings[serviceName] = bindMounts
+ }
+ }
+ return allFindings
+}
+
+func (s *composeService) checkForSensitiveData(project *types.Project) ([]secrets.DetectedSecret, error) {
+ var allFindings []secrets.DetectedSecret
+ scan := scanner.NewDefaultScanner()
+ // Check all compose files
+ for _, file := range project.ComposeFiles {
+ in, err := composeFileAsByteReader(file, project)
+ if err != nil {
+ return nil, err
+ }
+
+ findings, err := scan.ScanReader(in)
+ if err != nil {
+ return nil, fmt.Errorf("failed to scan compose file %s: %w", file, err)
+ }
+ allFindings = append(allFindings, findings...)
+ }
+ for _, service := range project.Services {
+ // Check env files
+ for _, envFile := range service.EnvFiles {
+ findings, err := scan.ScanFile(envFile.Path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to scan env file %s: %w", envFile.Path, err)
+ }
+ allFindings = append(allFindings, findings...)
+ }
+ }
+
+ // Check configs defined by files
+ for _, config := range project.Configs {
+ if config.File != "" {
+ findings, err := scan.ScanFile(config.File)
+ if err != nil {
+ return nil, fmt.Errorf("failed to scan config file %s: %w", config.File, err)
+ }
+ allFindings = append(allFindings, findings...)
+ }
+ }
+
+ // Check secrets defined by files
+ for _, secret := range project.Secrets {
+ if secret.File != "" {
+ findings, err := scan.ScanFile(secret.File)
+ if err != nil {
+ return nil, fmt.Errorf("failed to scan secret file %s: %w", secret.File, err)
+ }
+ allFindings = append(allFindings, findings...)
+ }
+ }
+
+ return allFindings, nil
+}
+
+func composeFileAsByteReader(filePath string, project *types.Project) (io.Reader, error) {
+ composeFile, err := os.ReadFile(filePath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open compose file %s: %w", filePath, err)
+ }
+ base, err := loader.LoadWithContext(context.TODO(), types.ConfigDetails{
+ WorkingDir: project.WorkingDir,
+ Environment: project.Environment,
+ ConfigFiles: []types.ConfigFile{
+ {
+ Filename: filePath,
+ Content: composeFile,
+ },
+ },
+ }, func(options *loader.Options) {
+ options.SkipValidation = true
+ options.SkipExtends = true
+ options.SkipConsistencyCheck = true
+ options.ResolvePaths = true
+ options.SkipInterpolation = true
+ options.SkipResolveEnvironment = true
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ in, err := base.MarshalYAML()
+ if err != nil {
+ return nil, err
+ }
+ return bytes.NewBuffer(in), nil
+}
diff --git a/pkg/compose/publish_test.go b/pkg/compose/publish_test.go
new file mode 100644
index 00000000000..03170a08f91
--- /dev/null
+++ b/pkg/compose/publish_test.go
@@ -0,0 +1,102 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "slices"
+ "testing"
+
+ "github.com/compose-spec/compose-go/v2/loader"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/compose/v5/internal"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/google/go-cmp/cmp"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "gotest.tools/v3/assert"
+)
+
+func Test_createLayers(t *testing.T) {
+ project, err := loader.LoadWithContext(context.TODO(), types.ConfigDetails{
+ WorkingDir: "testdata/publish/",
+ Environment: types.Mapping{},
+ ConfigFiles: []types.ConfigFile{
+ {
+ Filename: "testdata/publish/compose.yaml",
+ },
+ },
+ })
+ assert.NilError(t, err)
+ project.ComposeFiles = []string{"testdata/publish/compose.yaml"}
+
+ service := &composeService{}
+ layers, err := service.createLayers(context.TODO(), project, api.PublishOptions{
+ WithEnvironment: true,
+ })
+ assert.NilError(t, err)
+
+ published := string(layers[0].Data)
+ assert.Equal(t, published, `name: test
+services:
+ test:
+ extends:
+ file: f8f9ede3d201ec37d5a5e3a77bbadab79af26035e53135e19571f50d541d390c.yaml
+ service: foo
+
+ string:
+ image: test
+ env_file: 5efca9cdbac9f5394c6c2e2094b1b42661f988f57fcab165a0bf72b205451af3.env
+
+ list:
+ image: test
+ env_file:
+ - 5efca9cdbac9f5394c6c2e2094b1b42661f988f57fcab165a0bf72b205451af3.env
+
+ mapping:
+ image: test
+ env_file:
+ - path: 5efca9cdbac9f5394c6c2e2094b1b42661f988f57fcab165a0bf72b205451af3.env
+`)
+
+ expectedLayers := []v1.Descriptor{
+ {
+ MediaType: "application/vnd.docker.compose.file+yaml",
+ Annotations: map[string]string{
+ "com.docker.compose.file": "compose.yaml",
+ "com.docker.compose.version": internal.Version,
+ },
+ },
+ {
+ MediaType: "application/vnd.docker.compose.file+yaml",
+ Annotations: map[string]string{
+ "com.docker.compose.extends": "true",
+ "com.docker.compose.file": "f8f9ede3d201ec37d5a5e3a77bbadab79af26035e53135e19571f50d541d390c",
+ "com.docker.compose.version": internal.Version,
+ },
+ },
+ {
+ MediaType: "application/vnd.docker.compose.envfile",
+ Annotations: map[string]string{
+ "com.docker.compose.envfile": "5efca9cdbac9f5394c6c2e2094b1b42661f988f57fcab165a0bf72b205451af3",
+ "com.docker.compose.version": internal.Version,
+ },
+ },
+ }
+ assert.DeepEqual(t, expectedLayers, layers, cmp.FilterPath(func(path cmp.Path) bool {
+ return !slices.Contains([]string{".Data", ".Digest", ".Size"}, path.String())
+ }, cmp.Ignore()))
+}
diff --git a/pkg/compose/pull.go b/pkg/compose/pull.go
index 1517b071f27..194500c3134 100644
--- a/pkg/compose/pull.go
+++ b/pkg/compose/pull.go
@@ -21,224 +21,426 @@ import (
"encoding/base64"
"encoding/json"
"errors"
+ "fmt"
"io"
"strings"
+ "sync"
+ "time"
- "github.com/compose-spec/compose-go/types"
- "github.com/distribution/distribution/v3/reference"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/distribution/reference"
"github.com/docker/buildx/driver"
- moby "github.com/docker/docker/api/types"
+ "github.com/docker/cli/cli/config/configfile"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/client"
"github.com/docker/docker/pkg/jsonmessage"
- "github.com/docker/docker/registry"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/progress"
+ "github.com/docker/compose/v5/internal/registry"
+ "github.com/docker/compose/v5/pkg/api"
)
-func (s *composeService) Pull(ctx context.Context, project *types.Project, opts api.PullOptions) error {
- if opts.Quiet {
- return s.pull(ctx, project, opts)
- }
- return progress.Run(ctx, func(ctx context.Context) error {
- return s.pull(ctx, project, opts)
- })
+func (s *composeService) Pull(ctx context.Context, project *types.Project, options api.PullOptions) error {
+ return Run(ctx, func(ctx context.Context) error {
+ return s.pull(ctx, project, options)
+ }, "pull", s.events)
}
-func (s *composeService) pull(ctx context.Context, project *types.Project, opts api.PullOptions) error {
- info, err := s.apiClient().Info(ctx)
+func (s *composeService) pull(ctx context.Context, project *types.Project, opts api.PullOptions) error { //nolint:gocyclo
+ images, err := s.getLocalImagesDigests(ctx, project)
if err != nil {
return err
}
- if info.IndexServerAddress == "" {
- info.IndexServerAddress = registry.IndexServer
- }
-
- w := progress.ContextWriter(ctx)
eg, ctx := errgroup.WithContext(ctx)
+ eg.SetLimit(s.maxConcurrency)
+
+ var (
+ mustBuild []string
+ pullErrors = make([]error, len(project.Services))
+ imagesBeingPulled = map[string]string{}
+ )
- var mustBuild []string
- for _, service := range project.Services {
- service := service
+ i := 0
+ for name, service := range project.Services {
if service.Image == "" {
- w.Event(progress.Event{
- ID: service.Name,
- Status: progress.Done,
+ s.events.On(api.Resource{
+ ID: name,
+ Status: api.Done,
+ Text: "Skipped",
+ Details: "No image to be pulled",
+ })
+ continue
+ }
+
+ switch service.PullPolicy {
+ case types.PullPolicyNever, types.PullPolicyBuild:
+ s.events.On(api.Resource{
+ ID: "Image " + service.Image,
+ Status: api.Done,
Text: "Skipped",
})
continue
+ case types.PullPolicyMissing, types.PullPolicyIfNotPresent:
+ if imageAlreadyPresent(service.Image, images) {
+ s.events.On(api.Resource{
+ ID: "Image " + service.Image,
+ Status: api.Done,
+ Text: "Skipped",
+ Details: "Image is already present locally",
+ })
+ continue
+ }
}
+
+ if service.Build != nil && opts.IgnoreBuildable {
+ s.events.On(api.Resource{
+ ID: "Image " + service.Image,
+ Status: api.Done,
+ Text: "Skipped",
+ Details: "Image can be built",
+ })
+ continue
+ }
+
+ if _, ok := imagesBeingPulled[service.Image]; ok {
+ continue
+ }
+
+ imagesBeingPulled[service.Image] = service.Name
+
+ idx := i
eg.Go(func() error {
- err := s.pullServiceImage(ctx, service, info, s.configFile(), w, false)
+ _, err := s.pullServiceImage(ctx, service, opts.Quiet, project.Environment["DOCKER_DEFAULT_PLATFORM"])
if err != nil {
- if !opts.IgnoreFailures {
- if service.Build != nil {
- mustBuild = append(mustBuild, service.Name)
+ pullErrors[idx] = err
+ if service.Build != nil {
+ mustBuild = append(mustBuild, service.Name)
+ }
+ if !opts.IgnoreFailures && service.Build == nil {
+ if s.dryRun {
+ s.events.On(errorEventf("Image "+service.Image,
+ "error pulling image: %s", service.Image))
}
+ // fail fast if image can't be pulled nor built
return err
}
- w.TailMsgf("Pulling %s: %s", service.Name, err.Error())
}
return nil
})
+ i++
}
err = eg.Wait()
- if !opts.IgnoreFailures && len(mustBuild) > 0 {
- w.TailMsgf("WARNING: Some service image(s) must be built from source by running:\n docker compose build %s", strings.Join(mustBuild, " "))
+ if len(mustBuild) > 0 {
+ logrus.Warnf("WARNING: Some service image(s) must be built from source by running:\n docker compose build %s", strings.Join(mustBuild, " "))
}
- return err
-}
-
-func (s *composeService) pullServiceImage(ctx context.Context, service types.ServiceConfig, info moby.Info, configFile driver.Auth, w progress.Writer, quietPull bool) error {
- w.Event(progress.Event{
- ID: service.Name,
- Status: progress.Working,
- Text: "Pulling",
- })
- ref, err := reference.ParseNormalizedNamed(service.Image)
if err != nil {
return err
}
+ if opts.IgnoreFailures {
+ return nil
+ }
+ return errors.Join(pullErrors...)
+}
- repoInfo, err := registry.ParseRepositoryInfo(ref)
+func imageAlreadyPresent(serviceImage string, localImages map[string]api.ImageSummary) bool {
+ normalizedImage, err := reference.ParseDockerRef(serviceImage)
if err != nil {
- return err
+ return false
}
+ switch refType := normalizedImage.(type) {
+ case reference.NamedTagged:
+ _, ok := localImages[serviceImage]
+ return ok && refType.Tag() != "latest"
+ default:
+ _, ok := localImages[serviceImage]
+ return ok
+ }
+}
- key := repoInfo.Index.Name
- if repoInfo.Index.Official {
- key = info.IndexServerAddress
+func getUnwrappedErrorMessage(err error) string {
+ derr := errors.Unwrap(err)
+ if derr != nil {
+ return getUnwrappedErrorMessage(derr)
}
+ return err.Error()
+}
- authConfig, err := configFile.GetAuthConfig(key)
+func (s *composeService) pullServiceImage(ctx context.Context, service types.ServiceConfig, quietPull bool, defaultPlatform string) (string, error) {
+ resource := "Image " + service.Image
+ s.events.On(pullingEvent(service.Image))
+ ref, err := reference.ParseNormalizedNamed(service.Image)
if err != nil {
- return err
+ return "", err
}
- buf, err := json.Marshal(authConfig)
+ encodedAuth, err := encodedAuth(ref, s.configFile())
if err != nil {
- return err
+ return "", err
}
- stream, err := s.apiClient().ImagePull(ctx, service.Image, moby.ImagePullOptions{
- RegistryAuth: base64.URLEncoding.EncodeToString(buf),
- Platform: service.Platform,
+ platform := service.Platform
+ if platform == "" {
+ platform = defaultPlatform
+ }
+
+ stream, err := s.apiClient().ImagePull(ctx, service.Image, image.PullOptions{
+ RegistryAuth: encodedAuth,
+ Platform: platform,
})
- if err != nil {
- w.Event(progress.Event{
- ID: service.Name,
- Status: progress.Error,
- Text: "Error",
+
+ if ctx.Err() != nil {
+ s.events.On(api.Resource{
+ ID: resource,
+ Status: api.Warning,
+ Text: "Interrupted",
+ })
+ return "", nil
+ }
+
+ // check if has error and the service has a build section
+ // then the status should be warning instead of error
+ if err != nil && service.Build != nil {
+ s.events.On(api.Resource{
+ ID: resource,
+ Status: api.Warning,
+ Text: getUnwrappedErrorMessage(err),
})
- return WrapCategorisedComposeError(err, PullFailure)
+ return "", err
+ }
+
+ if err != nil {
+ s.events.On(errorEvent(resource, getUnwrappedErrorMessage(err)))
+ return "", err
}
dec := json.NewDecoder(stream)
for {
var jm jsonmessage.JSONMessage
if err := dec.Decode(&jm); err != nil {
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
break
}
- return WrapCategorisedComposeError(err, PullFailure)
+ return "", err
}
if jm.Error != nil {
- return WrapCategorisedComposeError(errors.New(jm.Error.Message), PullFailure)
+ return "", errors.New(jm.Error.Message)
}
if !quietPull {
- toPullProgressEvent(service.Name, jm, w)
+ toPullProgressEvent(resource, jm, s.events)
}
}
- w.Event(progress.Event{
- ID: service.Name,
- Status: progress.Done,
- Text: "Pulled",
- })
- return nil
+ s.events.On(pulledEvent(service.Image))
+
+ inspected, err := s.apiClient().ImageInspect(ctx, service.Image)
+ if err != nil {
+ return "", err
+ }
+ return inspected.ID, nil
}
-func (s *composeService) pullRequiredImages(ctx context.Context, project *types.Project, images map[string]string, quietPull bool) error {
- info, err := s.apiClient().Info(ctx)
+// ImageDigestResolver creates a func able to resolve image digest from a docker ref,
+func ImageDigestResolver(ctx context.Context, file *configfile.ConfigFile, apiClient client.APIClient) func(named reference.Named) (digest.Digest, error) {
+ return func(named reference.Named) (digest.Digest, error) {
+ auth, err := encodedAuth(named, file)
+ if err != nil {
+ return "", err
+ }
+ inspect, err := apiClient.DistributionInspect(ctx, named.String(), auth)
+ if err != nil {
+ return "",
+ fmt.Errorf("failed to resolve digest for %s: %w", named.String(), err)
+ }
+ return inspect.Descriptor.Digest, nil
+ }
+}
+
+func encodedAuth(ref reference.Named, configFile driver.Auth) (string, error) {
+ authConfig, err := configFile.GetAuthConfig(registry.GetAuthConfigKey(reference.Domain(ref)))
if err != nil {
- return err
+ return "", err
}
- if info.IndexServerAddress == "" {
- info.IndexServerAddress = registry.IndexServer
+ buf, err := json.Marshal(authConfig)
+ if err != nil {
+ return "", err
}
+ return base64.URLEncoding.EncodeToString(buf), nil
+}
- var needPull []types.ServiceConfig
- for _, service := range project.Services {
- if service.Image == "" {
- continue
+func (s *composeService) pullRequiredImages(ctx context.Context, project *types.Project, images map[string]api.ImageSummary, quietPull bool) error {
+ needPull := map[string]types.ServiceConfig{}
+ for name, service := range project.Services {
+ pull, err := mustPull(service, images)
+ if err != nil {
+ return err
}
- switch service.PullPolicy {
- case "", types.PullPolicyMissing, types.PullPolicyIfNotPresent:
- if _, ok := images[service.Image]; ok {
- continue
+ if pull {
+ needPull[name] = service
+ }
+ for i, vol := range service.Volumes {
+ if vol.Type == types.VolumeTypeImage {
+ if _, ok := images[vol.Source]; !ok {
+ // Hack: create a fake ServiceConfig so we pull missing volume image
+ n := fmt.Sprintf("%s:volume %d", name, i)
+ needPull[n] = types.ServiceConfig{
+ Name: n,
+ Image: vol.Source,
+ }
+ }
}
- case types.PullPolicyNever, types.PullPolicyBuild:
- continue
- case types.PullPolicyAlways:
- // force pull
}
- needPull = append(needPull, service)
+
}
if len(needPull) == 0 {
return nil
}
- return progress.Run(ctx, func(ctx context.Context) error {
- w := progress.ContextWriter(ctx)
- eg, ctx := errgroup.WithContext(ctx)
- for _, service := range needPull {
- service := service
- eg.Go(func() error {
- err := s.pullServiceImage(ctx, service, info, s.configFile(), w, quietPull)
- if err != nil && service.Build != nil {
- // image can be built, so we can ignore pull failure
- return nil
- }
- return err
- })
+ eg, ctx := errgroup.WithContext(ctx)
+ eg.SetLimit(s.maxConcurrency)
+ pulledImages := map[string]api.ImageSummary{}
+ var mutex sync.Mutex
+ for name, service := range needPull {
+ eg.Go(func() error {
+ id, err := s.pullServiceImage(ctx, service, quietPull, project.Environment["DOCKER_DEFAULT_PLATFORM"])
+ mutex.Lock()
+ defer mutex.Unlock()
+ pulledImages[name] = api.ImageSummary{
+ ID: id,
+ Repository: service.Image,
+ LastTagTime: time.Now(),
+ }
+ if err != nil && isServiceImageToBuild(service, project.Services) {
+ // image can be built, so we can ignore pull failure
+ return nil
+ }
+ return err
+ })
+ }
+ err := eg.Wait()
+ for i, service := range needPull {
+ if pulledImages[i].ID != "" {
+ images[service.Image] = pulledImages[i]
}
- return eg.Wait()
- })
+ }
+ return err
+}
+
+func mustPull(service types.ServiceConfig, images map[string]api.ImageSummary) (bool, error) {
+ if service.Provider != nil {
+ return false, nil
+ }
+ if service.Image == "" {
+ return false, nil
+ }
+ policy, duration, err := service.GetPullPolicy()
+ if err != nil {
+ return false, err
+ }
+ switch policy {
+ case types.PullPolicyAlways:
+ // force pull
+ return true, nil
+ case types.PullPolicyNever, types.PullPolicyBuild:
+ return false, nil
+ case types.PullPolicyRefresh:
+ img, ok := images[service.Image]
+ if !ok {
+ return true, nil
+ }
+ return time.Now().After(img.LastTagTime.Add(duration)), nil
+ default: // Pull if missing
+ _, ok := images[service.Image]
+ return !ok, nil
+ }
+}
+
+func isServiceImageToBuild(service types.ServiceConfig, services types.Services) bool {
+ if service.Build != nil {
+ return true
+ }
+
+ if service.Image == "" {
+ // N.B. this should be impossible as service must have either `build` or `image` (or both)
+ return false
+ }
+
+ // look through the other services to see if another has a build definition for the same
+ // image name
+ for _, svc := range services {
+ if svc.Image == service.Image && svc.Build != nil {
+ return true
+ }
+ }
+ return false
}
-func toPullProgressEvent(parent string, jm jsonmessage.JSONMessage, w progress.Writer) {
+const (
+ PreparingPhase = "Preparing"
+ WaitingPhase = "waiting"
+ PullingFsPhase = "Pulling fs layer"
+ DownloadingPhase = "Downloading"
+ DownloadCompletePhase = "Download complete"
+ ExtractingPhase = "Extracting"
+ VerifyingChecksumPhase = "Verifying Checksum"
+ AlreadyExistsPhase = "Already exists"
+ PullCompletePhase = "Pull complete"
+)
+
+func toPullProgressEvent(parent string, jm jsonmessage.JSONMessage, events api.EventProcessor) {
if jm.ID == "" || jm.Progress == nil {
return
}
var (
- text string
- status = progress.Working
+ text string
+ total int64
+ percent int
+ current int64
+ status = api.Working
)
text = jm.Progress.String()
- if jm.Status == "Pull complete" ||
- jm.Status == "Already exists" ||
- strings.Contains(jm.Status, "Image is up to date") ||
+ switch jm.Status {
+ case PreparingPhase, WaitingPhase, PullingFsPhase:
+ percent = 0
+ case DownloadingPhase, ExtractingPhase, VerifyingChecksumPhase:
+ if jm.Progress != nil {
+ current = jm.Progress.Current
+ total = jm.Progress.Total
+ if jm.Progress.Total > 0 {
+ percent = int(jm.Progress.Current * 100 / jm.Progress.Total)
+ }
+ }
+ case DownloadCompletePhase, AlreadyExistsPhase, PullCompletePhase:
+ status = api.Done
+ percent = 100
+ }
+
+ if strings.Contains(jm.Status, "Image is up to date") ||
strings.Contains(jm.Status, "Downloaded newer image") {
- status = progress.Done
+ status = api.Done
+ percent = 100
}
if jm.Error != nil {
- status = progress.Error
+ status = api.Error
text = jm.Error.Message
}
- w.Event(progress.Event{
- ID: jm.ID,
- ParentID: parent,
- Text: jm.Status,
- Status: status,
- StatusText: text,
+ events.On(api.Resource{
+ ID: jm.ID,
+ ParentID: parent,
+ Current: current,
+ Total: total,
+ Percent: percent,
+ Status: status,
+ Text: text,
})
}
diff --git a/pkg/compose/push.go b/pkg/compose/push.go
index d76d13715b7..abf9453530a 100644
--- a/pkg/compose/push.go
+++ b/pkg/compose/push.go
@@ -20,80 +20,78 @@ import (
"context"
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
"io"
+ "strings"
- "github.com/compose-spec/compose-go/types"
- "github.com/distribution/distribution/v3/reference"
- "github.com/docker/buildx/driver"
- moby "github.com/docker/docker/api/types"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/distribution/reference"
+ "github.com/docker/docker/api/types/image"
"github.com/docker/docker/pkg/jsonmessage"
- "github.com/docker/docker/registry"
- "github.com/pkg/errors"
"golang.org/x/sync/errgroup"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/progress"
+ "github.com/docker/compose/v5/internal/registry"
+ "github.com/docker/compose/v5/pkg/api"
)
func (s *composeService) Push(ctx context.Context, project *types.Project, options api.PushOptions) error {
- return progress.Run(ctx, func(ctx context.Context) error {
+ if options.Quiet {
return s.push(ctx, project, options)
- })
+ }
+ return Run(ctx, func(ctx context.Context) error {
+ return s.push(ctx, project, options)
+ }, "push", s.events)
}
func (s *composeService) push(ctx context.Context, project *types.Project, options api.PushOptions) error {
eg, ctx := errgroup.WithContext(ctx)
+ eg.SetLimit(s.maxConcurrency)
- info, err := s.apiClient().Info(ctx)
- if err != nil {
- return err
- }
- if info.IndexServerAddress == "" {
- info.IndexServerAddress = registry.IndexServer
- }
-
- w := progress.ContextWriter(ctx)
for _, service := range project.Services {
if service.Build == nil || service.Image == "" {
- w.Event(progress.Event{
+ if options.ImageMandatory && service.Image == "" && service.Provider == nil {
+ return fmt.Errorf("%q attribute is mandatory to push an image for service %q", "service.image", service.Name)
+ }
+ s.events.On(api.Resource{
ID: service.Name,
- Status: progress.Done,
+ Status: api.Done,
Text: "Skipped",
})
continue
}
- service := service
- eg.Go(func() error {
- err := s.pushServiceImage(ctx, service, info, s.configFile(), w)
- if err != nil {
- if !options.IgnoreFailures {
- return err
+ tags := []string{service.Image}
+ if service.Build != nil {
+ tags = append(tags, service.Build.Tags...)
+ }
+
+ for _, tag := range tags {
+ eg.Go(func() error {
+ s.events.On(newEvent(tag, api.Working, "Pushing"))
+ err := s.pushServiceImage(ctx, tag, options.Quiet)
+ if err != nil {
+ if !options.IgnoreFailures {
+ s.events.On(newEvent(tag, api.Error, err.Error()))
+ return err
+ }
+ s.events.On(newEvent(tag, api.Warning, err.Error()))
+ } else {
+ s.events.On(newEvent(tag, api.Done, "Pushed"))
}
- w.TailMsgf("Pushing %s: %s", service.Name, err.Error())
- }
- return nil
- })
+ return nil
+ })
+ }
}
return eg.Wait()
}
-func (s *composeService) pushServiceImage(ctx context.Context, service types.ServiceConfig, info moby.Info, configFile driver.Auth, w progress.Writer) error {
- ref, err := reference.ParseNormalizedNamed(service.Image)
+func (s *composeService) pushServiceImage(ctx context.Context, tag string, quietPush bool) error {
+ ref, err := reference.ParseNormalizedNamed(tag)
if err != nil {
return err
}
- repoInfo, err := registry.ParseRepositoryInfo(ref)
- if err != nil {
- return err
- }
-
- key := repoInfo.Index.Name
- if repoInfo.Index.Official {
- key = info.IndexServerAddress
- }
- authConfig, err := configFile.GetAuthConfig(key)
+ authConfig, err := s.configFile().GetAuthConfig(registry.GetAuthConfigKey(reference.Domain(ref)))
if err != nil {
return err
}
@@ -103,7 +101,7 @@ func (s *composeService) pushServiceImage(ctx context.Context, service types.Ser
return err
}
- stream, err := s.apiClient().ImagePush(ctx, service.Image, moby.ImagePushOptions{
+ stream, err := s.apiClient().ImagePush(ctx, tag, image.PushOptions{
RegistryAuth: base64.URLEncoding.EncodeToString(buf),
})
if err != nil {
@@ -113,7 +111,7 @@ func (s *composeService) pushServiceImage(ctx context.Context, service types.Ser
for {
var jm jsonmessage.JSONMessage
if err := dec.Decode(&jm); err != nil {
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
break
}
return err
@@ -121,34 +119,63 @@ func (s *composeService) pushServiceImage(ctx context.Context, service types.Ser
if jm.Error != nil {
return errors.New(jm.Error.Message)
}
- toPushProgressEvent(service.Name, jm, w)
+
+ if !quietPush {
+ toPushProgressEvent(tag, jm, s.events)
+ }
}
+
return nil
}
-func toPushProgressEvent(prefix string, jm jsonmessage.JSONMessage, w progress.Writer) {
+func toPushProgressEvent(prefix string, jm jsonmessage.JSONMessage, events api.EventProcessor) {
if jm.ID == "" {
// skipped
return
}
var (
- text string
- status = progress.Working
+ text string
+ status = api.Working
+ total int64
+ current int64
+ percent int
)
- if jm.Status == "Pull complete" || jm.Status == "Already exists" {
- status = progress.Done
+ if isDone(jm) {
+ status = api.Done
+ percent = 100
}
if jm.Error != nil {
- status = progress.Error
+ status = api.Error
text = jm.Error.Message
}
if jm.Progress != nil {
text = jm.Progress.String()
+ if jm.Progress.Total != 0 {
+ current = jm.Progress.Current
+ total = jm.Progress.Total
+ if jm.Progress.Total > 0 {
+ percent = int(jm.Progress.Current * 100 / jm.Progress.Total)
+ }
+ }
}
- w.Event(progress.Event{
- ID: fmt.Sprintf("Pushing %s: %s", prefix, jm.ID),
- Text: jm.Status,
- Status: status,
- StatusText: text,
+
+ events.On(api.Resource{
+ ParentID: prefix,
+ ID: jm.ID,
+ Text: text,
+ Status: status,
+ Current: current,
+ Total: total,
+ Percent: percent,
})
}
+
+func isDone(msg jsonmessage.JSONMessage) bool {
+ // TODO there should be a better way to detect push is done than such a status message check
+ switch strings.ToLower(msg.Status) {
+ case "pushed", "layer already exists":
+ return true
+ default:
+ return false
+ }
+}
diff --git a/pkg/compose/remove.go b/pkg/compose/remove.go
index c5dd3dd845b..aee1b60d1ac 100644
--- a/pkg/compose/remove.go
+++ b/pkg/compose/remove.go
@@ -21,42 +21,67 @@ import (
"fmt"
"strings"
- "github.com/docker/compose/v2/pkg/api"
- moby "github.com/docker/docker/api/types"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/api/types/container"
"golang.org/x/sync/errgroup"
-
- "github.com/docker/compose/v2/pkg/progress"
- "github.com/docker/compose/v2/pkg/prompt"
)
func (s *composeService) Remove(ctx context.Context, projectName string, options api.RemoveOptions) error {
- containers, _, err := s.actualState(ctx, projectName, options.Services)
+ projectName = strings.ToLower(projectName)
+
+ if options.Stop {
+ err := s.Stop(ctx, projectName, api.StopOptions{
+ Services: options.Services,
+ Project: options.Project,
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ containers, err := s.getContainers(ctx, projectName, oneOffExclude, true, options.Services...)
if err != nil {
if api.IsNotFoundError(err) {
- fmt.Fprintln(s.stderr(), "No stopped containers")
+ _, _ = fmt.Fprintln(s.stderr(), "No stopped containers")
return nil
}
return err
}
- stoppedContainers := containers.filter(func(c moby.Container) bool {
- return c.State != ContainerRunning
- })
+ if options.Project != nil {
+ containers = containers.filter(isService(options.Project.ServiceNames()...))
+ }
+
+ var stoppedContainers Containers
+ for _, ctr := range containers {
+ // We have to inspect containers, as State reported by getContainers suffers a race condition
+ inspected, err := s.apiClient().ContainerInspect(ctx, ctr.ID)
+ if api.IsNotFoundError(err) {
+ // Already removed. Maybe configured with auto-remove
+ continue
+ }
+ if err != nil {
+ return err
+ }
+ if !inspected.State.Running || (options.Stop && s.dryRun) {
+ stoppedContainers = append(stoppedContainers, ctr)
+ }
+ }
var names []string
- stoppedContainers.forEach(func(c moby.Container) {
+ stoppedContainers.forEach(func(c container.Summary) {
names = append(names, getCanonicalContainerName(c))
})
if len(names) == 0 {
- fmt.Fprintln(s.stderr(), "No stopped containers")
- return nil
+ return api.ErrNoResources
}
+
msg := fmt.Sprintf("Going to remove %s", strings.Join(names, ", "))
if options.Force {
- fmt.Println(msg)
+ _, _ = fmt.Fprintln(s.stdout(), msg)
} else {
- confirm, err := prompt.User{}.Confirm(msg, false)
+ confirm, err := s.prompt(msg, false)
if err != nil {
return err
}
@@ -64,25 +89,23 @@ func (s *composeService) Remove(ctx context.Context, projectName string, options
return nil
}
}
- return progress.Run(ctx, func(ctx context.Context) error {
+ return Run(ctx, func(ctx context.Context) error {
return s.remove(ctx, stoppedContainers, options)
- })
+ }, "remove", s.events)
}
func (s *composeService) remove(ctx context.Context, containers Containers, options api.RemoveOptions) error {
- w := progress.ContextWriter(ctx)
eg, ctx := errgroup.WithContext(ctx)
- for _, container := range containers {
- container := container
+ for _, ctr := range containers {
eg.Go(func() error {
- eventName := getContainerProgressName(container)
- w.Event(progress.RemovingEvent(eventName))
- err := s.apiClient().ContainerRemove(ctx, container.ID, moby.ContainerRemoveOptions{
+ eventName := getContainerProgressName(ctr)
+ s.events.On(removingEvent(eventName))
+ err := s.apiClient().ContainerRemove(ctx, ctr.ID, container.RemoveOptions{
RemoveVolumes: options.Volumes,
Force: options.Force,
})
if err == nil {
- w.Event(progress.RemovedEvent(eventName))
+ s.events.On(removedEvent(eventName))
}
return err
})
diff --git a/pkg/compose/restart.go b/pkg/compose/restart.go
index 34b9d6d33fb..6b24586947b 100644
--- a/pkg/compose/restart.go
+++ b/pkg/compose/restart.go
@@ -18,58 +18,96 @@ package compose
import (
"context"
+ "strings"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/utils"
+ "github.com/docker/docker/api/types/container"
"golang.org/x/sync/errgroup"
-
- "github.com/docker/compose/v2/pkg/progress"
- "github.com/docker/compose/v2/pkg/utils"
)
func (s *composeService) Restart(ctx context.Context, projectName string, options api.RestartOptions) error {
- return progress.Run(ctx, func(ctx context.Context) error {
- return s.restart(ctx, projectName, options)
- })
+ return Run(ctx, func(ctx context.Context) error {
+ return s.restart(ctx, strings.ToLower(projectName), options)
+ }, "restart", s.events)
}
-func (s *composeService) restart(ctx context.Context, projectName string, options api.RestartOptions) error {
-
- observedState, err := s.getContainers(ctx, projectName, oneOffInclude, true)
+func (s *composeService) restart(ctx context.Context, projectName string, options api.RestartOptions) error { //nolint:gocyclo
+ containers, err := s.getContainers(ctx, projectName, oneOffExclude, true)
if err != nil {
return err
}
- project, err := s.projectFromName(observedState, projectName, options.Services...)
+ project := options.Project
+ if project == nil {
+ project, err = s.getProjectWithResources(ctx, containers, projectName)
+ if err != nil {
+ return err
+ }
+ }
+
+ if options.NoDeps {
+ project, err = project.WithSelectedServices(options.Services, types.IgnoreDependencies)
+ if err != nil {
+ return err
+ }
+ }
+
+ // ignore depends_on relations which are not impacted by restarting service or not required
+ project, err = project.WithServicesTransform(func(_ string, s types.ServiceConfig) (types.ServiceConfig, error) {
+ for name, r := range s.DependsOn {
+ if !r.Restart {
+ delete(s.DependsOn, name)
+ }
+ }
+ return s, nil
+ })
if err != nil {
return err
}
- if len(options.Services) == 0 {
- options.Services = project.ServiceNames()
+ if len(options.Services) != 0 {
+ project, err = project.WithSelectedServices(options.Services, types.IncludeDependents)
+ if err != nil {
+ return err
+ }
}
- w := progress.ContextWriter(ctx)
- err = InDependencyOrder(ctx, project, func(c context.Context, service string) error {
- if !utils.StringContains(options.Services, service) {
- return nil
+ return InDependencyOrder(ctx, project, func(c context.Context, service string) error {
+ config := project.Services[service]
+ err = s.waitDependencies(ctx, project, service, config.DependsOn, containers, 0)
+ if err != nil {
+ return err
}
+
eg, ctx := errgroup.WithContext(ctx)
- for _, container := range observedState.filter(isService(service)) {
- container := container
+ for _, ctr := range containers.filter(isService(service)) {
eg.Go(func() error {
- eventName := getContainerProgressName(container)
- w.Event(progress.RestartingEvent(eventName))
- err := s.apiClient().ContainerRestart(ctx, container.ID, options.Timeout)
- if err == nil {
- w.Event(progress.StartedEvent(eventName))
+ def := project.Services[service]
+ for _, hook := range def.PreStop {
+ err = s.runHook(ctx, ctr, def, hook, nil)
+ if err != nil {
+ return err
+ }
+ }
+ eventName := getContainerProgressName(ctr)
+ s.events.On(restartingEvent(eventName))
+ timeout := utils.DurationSecondToInt(options.Timeout)
+ err = s.apiClient().ContainerRestart(ctx, ctr.ID, container.StopOptions{Timeout: timeout})
+ if err != nil {
+ return err
}
- return err
+ s.events.On(startedEvent(eventName))
+ for _, hook := range def.PostStart {
+ err = s.runHook(ctx, ctr, def, hook, nil)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
})
}
return eg.Wait()
})
- if err != nil {
- return err
- }
- return nil
}
diff --git a/pkg/compose/run.go b/pkg/compose/run.go
index 3c386f3b595..4b4eb53e8b9 100644
--- a/pkg/compose/run.go
+++ b/pkg/compose/run.go
@@ -18,12 +18,16 @@ package compose
import (
"context"
+ "errors"
"fmt"
+ "os"
+ "os/signal"
+ "slices"
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/types"
"github.com/docker/cli/cli"
cmd "github.com/docker/cli/cli/command/container"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
"github.com/docker/docker/pkg/stringid"
)
@@ -33,22 +37,41 @@ func (s *composeService) RunOneOffContainer(ctx context.Context, project *types.
return 0, err
}
- start := cmd.NewStartOptions()
- start.OpenStdin = !opts.Detach && opts.Interactive
- start.Attach = !opts.Detach
- start.Containers = []string{containerID}
+ // remove cancellable context signal handler so we can forward signals to container without compose to exit
+ signal.Reset()
- err = cmd.RunStart(s.dockerCli, &start)
- if sterr, ok := err.(cli.StatusError); ok {
- return sterr.StatusCode, nil
+ sigc := make(chan os.Signal, 128)
+ signal.Notify(sigc)
+ go cmd.ForwardAllSignals(ctx, s.apiClient(), containerID, sigc)
+ defer signal.Stop(sigc)
+
+ err = cmd.RunStart(ctx, s.dockerCli, &cmd.StartOptions{
+ OpenStdin: !opts.Detach && opts.Interactive,
+ Attach: !opts.Detach,
+ Containers: []string{containerID},
+ DetachKeys: s.configFile().DetachKeys,
+ })
+ var stErr cli.StatusError
+ if errors.As(err, &stErr) {
+ return stErr.StatusCode, nil
}
return 0, err
}
func (s *composeService) prepareRun(ctx context.Context, project *types.Project, opts api.RunOptions) (string, error) {
- if err := prepareVolumes(project); err != nil { // all dependencies already checked, but might miss service img
+ // Temporary implementation of use_api_socket until we get actual support inside docker engine
+ project, err := s.useAPISocket(project)
+ if err != nil {
+ return "", err
+ }
+
+ err = Run(ctx, func(ctx context.Context) error {
+ return s.startDependencies(ctx, project, opts)
+ }, "run", s.events)
+ if err != nil {
return "", err
}
+
service, err := project.GetService(opts.Service)
if err != nil {
return "", err
@@ -56,15 +79,16 @@ func (s *composeService) prepareRun(ctx context.Context, project *types.Project,
applyRunOptions(project, &service, opts)
- if err := s.dockerCli.In().CheckTty(opts.Interactive, service.Tty); err != nil {
+ if err := s.stdin().CheckTty(opts.Interactive, service.Tty); err != nil {
return "", err
}
slug := stringid.GenerateRandomID()
if service.ContainerName == "" {
- service.ContainerName = fmt.Sprintf("%s_%s_run_%s", project.Name, service.Name, stringid.TruncateID(slug))
+ service.ContainerName = fmt.Sprintf("%[1]s%[4]s%[2]s%[4]srun%[4]s%[3]s", project.Name, service.Name, stringid.TruncateID(slug), api.Separator)
}
- service.Scale = 1
+ one := 1
+ service.Scale = &one
service.Restart = ""
if service.Deploy != nil {
service.Deploy.RestartPolicy = nil
@@ -73,27 +97,66 @@ func (s *composeService) prepareRun(ctx context.Context, project *types.Project,
Add(api.SlugLabel, slug).
Add(api.OneoffLabel, "True")
- if err := s.ensureImagesExists(ctx, project, opts.QuietPull); err != nil { // all dependencies already checked, but might miss service img
+ // Only ensure image exists for the target service, dependencies were already handled by startDependencies
+ buildOpts := prepareBuildOptions(opts)
+ if err := s.ensureImagesExists(ctx, project, buildOpts, opts.QuietPull); err != nil { // all dependencies already checked, but might miss service img
return "", err
}
+
+ observedState, err := s.getContainers(ctx, project.Name, oneOffInclude, true)
+ if err != nil {
+ return "", err
+ }
+
if !opts.NoDeps {
- if err := s.waitDependencies(ctx, project, service.DependsOn); err != nil {
+ if err := s.waitDependencies(ctx, project, service.Name, service.DependsOn, observedState, 0); err != nil {
return "", err
}
}
+ createOpts := createOptions{
+ AutoRemove: opts.AutoRemove,
+ AttachStdin: opts.Interactive,
+ UseNetworkAliases: opts.UseNetworkAliases,
+ Labels: mergeLabels(service.Labels, service.CustomLabels),
+ }
- observedState, err := s.getContainers(ctx, project.Name, oneOffInclude, true)
+ err = newConvergence(project.ServiceNames(), observedState, nil, nil, s).resolveServiceReferences(&service)
+ if err != nil {
+ return "", err
+ }
+
+ err = s.ensureModels(ctx, project, opts.QuietPull)
+ if err != nil {
+ return "", err
+ }
+
+ created, err := s.createContainer(ctx, project, service, service.ContainerName, -1, createOpts)
if err != nil {
return "", err
}
- updateServices(&service, observedState)
- created, err := s.createContainer(ctx, project, service, service.ContainerName, 1,
- opts.AutoRemove, opts.UseNetworkAliases, opts.Interactive)
+ ctr, err := s.apiClient().ContainerInspect(ctx, created.ID)
if err != nil {
return "", err
}
- return created.ID, nil
+
+ err = s.injectSecrets(ctx, project, service, ctr.ID)
+ if err != nil {
+ return created.ID, err
+ }
+
+ err = s.injectConfigs(ctx, project, service, ctr.ID)
+ return created.ID, err
+}
+
+func prepareBuildOptions(opts api.RunOptions) *api.BuildOptions {
+ if opts.Build == nil {
+ return nil
+ }
+ // Create a copy of build options and restrict to only the target service
+ buildOptsCopy := *opts.Build
+ buildOptsCopy.Services = []string{opts.Service}
+ return &buildOptsCopy
}
func applyRunOptions(project *types.Project, service *types.ServiceConfig, opts api.RunOptions) {
@@ -104,24 +167,60 @@ func applyRunOptions(project *types.Project, service *types.ServiceConfig, opts
if len(opts.Command) > 0 {
service.Command = opts.Command
}
- if len(opts.User) > 0 {
+ if opts.User != "" {
service.User = opts.User
}
- if len(opts.WorkingDir) > 0 {
+
+ if len(opts.CapAdd) > 0 {
+ service.CapAdd = append(service.CapAdd, opts.CapAdd...)
+ service.CapDrop = slices.DeleteFunc(service.CapDrop, func(e string) bool { return slices.Contains(opts.CapAdd, e) })
+ }
+ if len(opts.CapDrop) > 0 {
+ service.CapDrop = append(service.CapDrop, opts.CapDrop...)
+ service.CapAdd = slices.DeleteFunc(service.CapAdd, func(e string) bool { return slices.Contains(opts.CapDrop, e) })
+ }
+ if opts.WorkingDir != "" {
service.WorkingDir = opts.WorkingDir
}
if opts.Entrypoint != nil {
service.Entrypoint = opts.Entrypoint
+ if len(opts.Command) == 0 {
+ service.Command = []string{}
+ }
}
if len(opts.Environment) > 0 {
- env := types.NewMappingWithEquals(opts.Environment)
- projectEnv := env.Resolve(func(s string) (string, bool) {
- v, ok := project.Environment[s]
+ cmdEnv := types.NewMappingWithEquals(opts.Environment)
+ serviceOverrideEnv := cmdEnv.Resolve(func(s string) (string, bool) {
+ v, ok := envResolver(project.Environment)(s)
return v, ok
}).RemoveEmpty()
- service.Environment.OverrideBy(projectEnv)
+ if service.Environment == nil {
+ service.Environment = types.MappingWithEquals{}
+ }
+ service.Environment.OverrideBy(serviceOverrideEnv)
}
for k, v := range opts.Labels {
service.Labels = service.Labels.Add(k, v)
}
}
+
+func (s *composeService) startDependencies(ctx context.Context, project *types.Project, options api.RunOptions) error {
+ project = project.WithServicesDisabled(options.Service)
+
+ err := s.Create(ctx, project, api.CreateOptions{
+ Build: options.Build,
+ IgnoreOrphans: options.IgnoreOrphans,
+ RemoveOrphans: options.RemoveOrphans,
+ QuietPull: options.QuietPull,
+ })
+ if err != nil {
+ return err
+ }
+
+ if len(project.Services) > 0 {
+ return s.Start(ctx, project.Name, api.StartOptions{
+ Project: project,
+ })
+ }
+ return nil
+}
diff --git a/pkg/compose/scale.go b/pkg/compose/scale.go
new file mode 100644
index 00000000000..5a773b31297
--- /dev/null
+++ b/pkg/compose/scale.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 Docker Compose CLI authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package compose
+
+import (
+ "context"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/compose/v5/internal/tracing"
+ "github.com/docker/compose/v5/pkg/api"
+)
+
+func (s *composeService) Scale(ctx context.Context, project *types.Project, options api.ScaleOptions) error {
+ return Run(ctx, tracing.SpanWrapFunc("project/scale", tracing.ProjectOptions(ctx, project), func(ctx context.Context) error {
+ err := s.create(ctx, project, api.CreateOptions{Services: options.Services})
+ if err != nil {
+ return err
+ }
+ return s.start(ctx, project.Name, api.StartOptions{Project: project, Services: options.Services}, nil)
+ }), "scale", s.events)
+}
diff --git a/pkg/compose/secrets.go b/pkg/compose/secrets.go
new file mode 100644
index 00000000000..72bc4b5c8e8
--- /dev/null
+++ b/pkg/compose/secrets.go
@@ -0,0 +1,179 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "archive/tar"
+ "bytes"
+ "context"
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/docker/api/types/container"
+)
+
+type mountType string
+
+const (
+ secretMount mountType = "secret"
+ configMount mountType = "config"
+)
+
+func (s *composeService) injectSecrets(ctx context.Context, project *types.Project, service types.ServiceConfig, id string) error {
+ return s.injectFileReferences(ctx, project, service, id, secretMount)
+}
+
+func (s *composeService) injectConfigs(ctx context.Context, project *types.Project, service types.ServiceConfig, id string) error {
+ return s.injectFileReferences(ctx, project, service, id, configMount)
+}
+
+func (s *composeService) injectFileReferences(ctx context.Context, project *types.Project, service types.ServiceConfig, id string, mountType mountType) error {
+ mounts, sources := s.getFilesAndMap(project, service, mountType)
+
+ for _, mount := range mounts {
+ content, err := s.resolveFileContent(project, sources[mount.Source], mountType)
+ if err != nil {
+ return err
+ }
+ if content == "" {
+ continue
+ }
+
+ if service.ReadOnly {
+ return fmt.Errorf("cannot create %s %q in read-only service %s: `file` is the sole supported option", mountType, sources[mount.Source].Name, service.Name)
+ }
+
+ s.setDefaultTarget(&mount, mountType)
+
+ if err := s.copyFileToContainer(ctx, id, content, mount); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *composeService) getFilesAndMap(project *types.Project, service types.ServiceConfig, mountType mountType) ([]types.FileReferenceConfig, map[string]types.FileObjectConfig) {
+ var files []types.FileReferenceConfig
+ var fileMap map[string]types.FileObjectConfig
+
+ switch mountType {
+ case secretMount:
+ files = make([]types.FileReferenceConfig, len(service.Secrets))
+ for i, config := range service.Secrets {
+ files[i] = types.FileReferenceConfig(config)
+ }
+ fileMap = make(map[string]types.FileObjectConfig)
+ for k, v := range project.Secrets {
+ fileMap[k] = types.FileObjectConfig(v)
+ }
+ case configMount:
+ files = make([]types.FileReferenceConfig, len(service.Configs))
+ for i, config := range service.Configs {
+ files[i] = types.FileReferenceConfig(config)
+ }
+ fileMap = make(map[string]types.FileObjectConfig)
+ for k, v := range project.Configs {
+ fileMap[k] = types.FileObjectConfig(v)
+ }
+ }
+ return files, fileMap
+}
+
+func (s *composeService) resolveFileContent(project *types.Project, source types.FileObjectConfig, mountType mountType) (string, error) {
+ if source.Content != "" {
+ // inlined, or already resolved by include
+ return source.Content, nil
+ }
+ if source.Environment != "" {
+ env, ok := project.Environment[source.Environment]
+ if !ok {
+ return "", fmt.Errorf("environment variable %q required by %s %q is not set", source.Environment, mountType, source.Name)
+ }
+ return env, nil
+ }
+ return "", nil
+}
+
+func (s *composeService) setDefaultTarget(file *types.FileReferenceConfig, mountType mountType) {
+ if file.Target == "" {
+ if mountType == secretMount {
+ file.Target = "/run/secrets/" + file.Source
+ } else {
+ file.Target = "/" + file.Source
+ }
+ } else if mountType == secretMount && !isAbsTarget(file.Target) {
+ file.Target = "/run/secrets/" + file.Target
+ }
+}
+
+func (s *composeService) copyFileToContainer(ctx context.Context, id, content string, file types.FileReferenceConfig) error {
+ b, err := createTar(content, file)
+ if err != nil {
+ return err
+ }
+
+ return s.apiClient().CopyToContainer(ctx, id, "/", &b, container.CopyToContainerOptions{
+ CopyUIDGID: file.UID != "" || file.GID != "",
+ })
+}
+
+func createTar(env string, config types.FileReferenceConfig) (bytes.Buffer, error) {
+ value := []byte(env)
+ b := bytes.Buffer{}
+ tarWriter := tar.NewWriter(&b)
+ mode := types.FileMode(0o444)
+ if config.Mode != nil {
+ mode = *config.Mode
+ }
+
+ var uid, gid int
+ if config.UID != "" {
+ v, err := strconv.Atoi(config.UID)
+ if err != nil {
+ return b, err
+ }
+ uid = v
+ }
+ if config.GID != "" {
+ v, err := strconv.Atoi(config.GID)
+ if err != nil {
+ return b, err
+ }
+ gid = v
+ }
+
+ header := &tar.Header{
+ Name: config.Target,
+ Size: int64(len(value)),
+ Mode: int64(mode),
+ ModTime: time.Now(),
+ Uid: uid,
+ Gid: gid,
+ }
+ err := tarWriter.WriteHeader(header)
+ if err != nil {
+ return bytes.Buffer{}, err
+ }
+ _, err = tarWriter.Write(value)
+ if err != nil {
+ return bytes.Buffer{}, err
+ }
+ err = tarWriter.Close()
+ return b, err
+}
diff --git a/pkg/compose/shellout.go b/pkg/compose/shellout.go
new file mode 100644
index 00000000000..e7f928da2e2
--- /dev/null
+++ b/pkg/compose/shellout.go
@@ -0,0 +1,89 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "os"
+ "os/exec"
+ "path/filepath"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli-plugins/metadata"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/cli/cli/flags"
+ "github.com/docker/compose/v5/internal"
+ "github.com/docker/docker/client"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/propagation"
+)
+
+// prepareShellOut prepare a shell-out command to be ran by Compose
+func (s *composeService) prepareShellOut(gctx context.Context, env types.Mapping, cmd *exec.Cmd) error {
+ env = env.Clone()
+ // remove DOCKER_CLI_PLUGIN... variable so a docker-cli plugin will detect it run standalone
+ delete(env, metadata.ReexecEnvvar)
+
+ // propagate opentelemetry context to child process, see https://github.com/open-telemetry/oteps/blob/main/text/0258-env-context-baggage-carriers.md
+ carrier := propagation.MapCarrier{}
+ otel.GetTextMapPropagator().Inject(gctx, &carrier)
+ env.Merge(types.Mapping(carrier))
+
+ cmd.Env = env.Values()
+ return nil
+}
+
+// propagateDockerEndpoint produces DOCKER_* env vars for a child CLI plugin to target the same docker endpoint
+// `cleanup` func MUST be called after child process completion to enforce removal of cert files
+func (s *composeService) propagateDockerEndpoint() ([]string, func(), error) {
+ cleanup := func() {}
+ env := types.Mapping{}
+
+ env[command.EnvOverrideContext] = s.dockerCli.CurrentContext()
+ env["USER_AGENT"] = "compose/" + internal.Version
+
+ endpoint := s.dockerCli.DockerEndpoint()
+ env[client.EnvOverrideHost] = endpoint.Host
+ if endpoint.TLSData != nil {
+ certs, err := os.MkdirTemp("", "compose")
+ if err != nil {
+ return nil, cleanup, err
+ }
+ cleanup = func() {
+ _ = os.RemoveAll(certs)
+ }
+ env[client.EnvOverrideCertPath] = certs
+ env["DOCKER_TLS"] = "1"
+ if !endpoint.SkipTLSVerify {
+ env[client.EnvTLSVerify] = "1"
+ }
+
+ err = os.WriteFile(filepath.Join(certs, flags.DefaultKeyFile), endpoint.TLSData.Key, 0o600)
+ if err != nil {
+ return nil, cleanup, err
+ }
+ err = os.WriteFile(filepath.Join(certs, flags.DefaultCertFile), endpoint.TLSData.Cert, 0o600)
+ if err != nil {
+ return nil, cleanup, err
+ }
+ err = os.WriteFile(filepath.Join(certs, flags.DefaultCaFile), endpoint.TLSData.CA, 0o600)
+ if err != nil {
+ return nil, cleanup, err
+ }
+ }
+ return env.Values(), cleanup, nil
+}
diff --git a/pkg/compose/start.go b/pkg/compose/start.go
index 4b1e3ed2db7..608184ff24b 100644
--- a/pkg/compose/start.go
+++ b/pkg/compose/start.go
@@ -18,20 +18,21 @@ package compose
import (
"context"
+ "errors"
+ "fmt"
+ "strings"
- "github.com/compose-spec/compose-go/types"
- moby "github.com/docker/docker/api/types"
- "github.com/pkg/errors"
- "golang.org/x/sync/errgroup"
+ "github.com/docker/compose/v5/pkg/api"
+ containerType "github.com/docker/docker/api/types/container"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/progress"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/docker/api/types/filters"
)
func (s *composeService) Start(ctx context.Context, projectName string, options api.StartOptions) error {
- return progress.Run(ctx, func(ctx context.Context) error {
- return s.start(ctx, projectName, options, nil)
- })
+ return Run(ctx, func(ctx context.Context) error {
+ return s.start(ctx, strings.ToLower(projectName), options, nil)
+ }, "start", s.events)
}
func (s *composeService) start(ctx context.Context, projectName string, options api.StartOptions, listener api.ContainerEventListener) error {
@@ -49,27 +50,25 @@ func (s *composeService) start(ctx context.Context, projectName string, options
}
}
- eg, ctx := errgroup.WithContext(ctx)
- if listener != nil {
- attached, err := s.attach(ctx, project, listener, options.AttachTo)
- if err != nil {
- return err
- }
-
- eg.Go(func() error {
- return s.watchContainers(context.Background(), project.Name, options.AttachTo, listener, attached, func(container moby.Container) error {
- return s.attachContainer(ctx, container, listener)
- })
- })
+ var containers Containers
+ containers, err := s.apiClient().ContainerList(ctx, containerType.ListOptions{
+ Filters: filters.NewArgs(
+ projectFilter(project.Name),
+ oneOffFilter(false),
+ ),
+ All: true,
+ })
+ if err != nil {
+ return err
}
- err := InDependencyOrder(ctx, project, func(c context.Context, name string) error {
+ err = InDependencyOrder(ctx, project, func(c context.Context, name string) error {
service, err := project.GetService(name)
if err != nil {
return err
}
- return s.startService(ctx, project, service)
+ return s.startService(ctx, project, service, containers, listener, options.WaitTimeout)
})
if err != nil {
return err
@@ -79,122 +78,38 @@ func (s *composeService) start(ctx context.Context, projectName string, options
depends := types.DependsOnConfig{}
for _, s := range project.Services {
depends[s.Name] = types.ServiceDependency{
- Condition: ServiceConditionRunningOrHealthy,
+ Condition: getDependencyCondition(s, project),
+ Required: true,
}
}
- err = s.waitDependencies(ctx, project, depends)
+ if options.WaitTimeout > 0 {
+ withTimeout, cancel := context.WithTimeout(ctx, options.WaitTimeout)
+ ctx = withTimeout
+ defer cancel()
+ }
+
+ err = s.waitDependencies(ctx, project, project.Name, depends, containers, 0)
if err != nil {
+ if errors.Is(ctx.Err(), context.DeadlineExceeded) {
+ return fmt.Errorf("application not healthy after %s", options.WaitTimeout)
+ }
return err
}
}
- return eg.Wait()
+ return nil
}
-type containerWatchFn func(container moby.Container) error
-
-// watchContainers uses engine events to capture container start/die and notify ContainerEventListener
-func (s *composeService) watchContainers(ctx context.Context, projectName string, services []string, listener api.ContainerEventListener, containers Containers, onStart containerWatchFn) error {
- watched := map[string]int{}
- for _, c := range containers {
- watched[c.ID] = 0
- }
-
- ctx, stop := context.WithCancel(ctx)
- err := s.Events(ctx, projectName, api.EventsOptions{
- Services: services,
- Consumer: func(event api.Event) error {
- if event.Status == "destroy" {
- // This container can't be inspected, because it's gone.
- // It's already been removed from the watched map.
- return nil
+// getDependencyCondition checks if service is depended on by other services
+// with service_completed_successfully condition, and applies that condition
+// instead, or --wait will never finish waiting for one-shot containers
+func getDependencyCondition(service types.ServiceConfig, project *types.Project) string {
+ for _, services := range project.Services {
+ for dependencyService, dependencyConfig := range services.DependsOn {
+ if dependencyService == service.Name && dependencyConfig.Condition == types.ServiceConditionCompletedSuccessfully {
+ return types.ServiceConditionCompletedSuccessfully
}
-
- inspected, err := s.apiClient().ContainerInspect(ctx, event.Container)
- if err != nil {
- return err
- }
- container := moby.Container{
- ID: inspected.ID,
- Names: []string{inspected.Name},
- Labels: inspected.Config.Labels,
- }
- name := getContainerNameWithoutProject(container)
-
- if event.Status == "stop" {
- listener(api.ContainerEvent{
- Type: api.ContainerEventStopped,
- Container: name,
- Service: container.Labels[api.ServiceLabel],
- })
-
- delete(watched, container.ID)
- if len(watched) == 0 {
- // all project containers stopped, we're done
- stop()
- }
- return nil
- }
-
- if event.Status == "die" {
- restarted := watched[container.ID]
- watched[container.ID] = restarted + 1
- // Container terminated.
- willRestart := willContainerRestart(inspected, restarted)
-
- listener(api.ContainerEvent{
- Type: api.ContainerEventExit,
- Container: name,
- Service: container.Labels[api.ServiceLabel],
- ExitCode: inspected.State.ExitCode,
- Restarting: willRestart,
- })
-
- if !willRestart {
- // we're done with this one
- delete(watched, container.ID)
- }
-
- if len(watched) == 0 {
- // all project containers stopped, we're done
- stop()
- }
- return nil
- }
-
- if event.Status == "start" {
- count, ok := watched[container.ID]
- mustAttach := ok && count > 0 // Container restarted, need to re-attach
- if !ok {
- // A new container has just been added to service by scale
- watched[container.ID] = 0
- mustAttach = true
- }
- if mustAttach {
- // Container restarted, need to re-attach
- err := onStart(container)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
- },
- })
- if errors.Is(ctx.Err(), context.Canceled) {
- return nil
- }
- return err
-}
-
-func willContainerRestart(container moby.ContainerJSON, restarted int) bool {
- policy := container.HostConfig.RestartPolicy
- if policy.IsAlways() || policy.IsUnlessStopped() {
- return true
- }
- if policy.IsOnFailure() {
- return container.State.ExitCode != 0 && policy.MaximumRetryCount > restarted
+ }
}
- return false
+ return ServiceConditionRunningOrHealthy
}
diff --git a/pkg/compose/stop.go b/pkg/compose/stop.go
index 686b852d277..79272513c0b 100644
--- a/pkg/compose/stop.go
+++ b/pkg/compose/stop.go
@@ -18,26 +18,41 @@ package compose
import (
"context"
+ "slices"
+ "strings"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/progress"
+ "github.com/docker/compose/v5/pkg/api"
)
func (s *composeService) Stop(ctx context.Context, projectName string, options api.StopOptions) error {
- return progress.Run(ctx, func(ctx context.Context) error {
- return s.stop(ctx, projectName, options)
- })
+ return Run(ctx, func(ctx context.Context) error {
+ return s.stop(ctx, strings.ToLower(projectName), options, nil)
+ }, "stop", s.events)
}
-func (s *composeService) stop(ctx context.Context, projectName string, options api.StopOptions) error {
- w := progress.ContextWriter(ctx)
-
- containers, project, err := s.actualState(ctx, projectName, options.Services)
+func (s *composeService) stop(ctx context.Context, projectName string, options api.StopOptions, event api.ContainerEventListener) error {
+ containers, err := s.getContainers(ctx, projectName, oneOffExclude, true)
if err != nil {
return err
}
+ project := options.Project
+ if project == nil {
+ project, err = s.getProjectWithResources(ctx, containers, projectName)
+ if err != nil {
+ return err
+ }
+ }
+
+ if len(options.Services) == 0 {
+ options.Services = project.ServiceNames()
+ }
+
return InReverseDependencyOrder(ctx, project, func(c context.Context, service string) error {
- return s.stopContainers(ctx, w, containers.filter(isService(service)), options.Timeout)
+ if !slices.Contains(options.Services, service) {
+ return nil
+ }
+ serv := project.Services[service]
+ return s.stopContainers(ctx, &serv, containers.filter(isService(service)).filter(isNotOneOff), options.Timeout, event)
})
}
diff --git a/pkg/compose/stop_test.go b/pkg/compose/stop_test.go
index e5848780e93..2508be06bff 100644
--- a/pkg/compose/stop_test.go
+++ b/pkg/compose/stop_test.go
@@ -22,37 +22,48 @@ import (
"testing"
"time"
- compose "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/mocks"
-
- moby "github.com/docker/docker/api/types"
- "github.com/golang/mock/gomock"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/volume"
+ "go.uber.org/mock/gomock"
"gotest.tools/v3/assert"
+
+ compose "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/utils"
)
func TestStopTimeout(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
- api := mocks.NewMockAPIClient(mockCtrl)
- cli := mocks.NewMockCli(mockCtrl)
- tested.dockerCli = cli
- cli.EXPECT().Client().Return(api).AnyTimes()
+ api, cli := prepareMocks(mockCtrl)
+ tested, err := NewComposeService(cli)
+ assert.NilError(t, err)
ctx := context.Background()
- api.EXPECT().ContainerList(gomock.Any(), projectFilterListOpt()).Return(
- []moby.Container{
+ api.EXPECT().ContainerList(gomock.Any(), projectFilterListOpt(false)).Return(
+ []container.Summary{
testContainer("service1", "123", false),
testContainer("service1", "456", false),
testContainer("service2", "789", false),
}, nil)
+ api.EXPECT().VolumeList(
+ gomock.Any(),
+ volume.ListOptions{
+ Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject))),
+ }).
+ Return(volume.ListResponse{}, nil)
+ api.EXPECT().NetworkList(gomock.Any(), network.ListOptions{Filters: filters.NewArgs(projectFilter(strings.ToLower(testProject)))}).
+ Return([]network.Summary{}, nil)
- timeout := time.Duration(2) * time.Second
- api.EXPECT().ContainerStop(gomock.Any(), "123", &timeout).Return(nil)
- api.EXPECT().ContainerStop(gomock.Any(), "456", &timeout).Return(nil)
- api.EXPECT().ContainerStop(gomock.Any(), "789", &timeout).Return(nil)
+ timeout := 2 * time.Second
+ stopConfig := container.StopOptions{Timeout: utils.DurationSecondToInt(&timeout)}
+ api.EXPECT().ContainerStop(gomock.Any(), "123", stopConfig).Return(nil)
+ api.EXPECT().ContainerStop(gomock.Any(), "456", stopConfig).Return(nil)
+ api.EXPECT().ContainerStop(gomock.Any(), "789", stopConfig).Return(nil)
- err := tested.Stop(ctx, strings.ToLower(testProject), compose.StopOptions{
+ err = tested.Stop(ctx, strings.ToLower(testProject), compose.StopOptions{
Timeout: &timeout,
})
assert.NilError(t, err)
diff --git a/pkg/compose/suffix_unix.go b/pkg/compose/suffix_unix.go
new file mode 100644
index 00000000000..59595384ca4
--- /dev/null
+++ b/pkg/compose/suffix_unix.go
@@ -0,0 +1,23 @@
+//go:build !windows
+
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+func executable(s string) string {
+ return s
+}
diff --git a/pkg/compose/testdata/publish/common.yaml b/pkg/compose/testdata/publish/common.yaml
new file mode 100644
index 00000000000..ce048e36623
--- /dev/null
+++ b/pkg/compose/testdata/publish/common.yaml
@@ -0,0 +1,3 @@
+services:
+ foo:
+ image: bar
diff --git a/pkg/compose/testdata/publish/compose.yaml b/pkg/compose/testdata/publish/compose.yaml
new file mode 100644
index 00000000000..9c9f3659b5e
--- /dev/null
+++ b/pkg/compose/testdata/publish/compose.yaml
@@ -0,0 +1,20 @@
+name: test
+services:
+ test:
+ extends:
+ file: common.yaml
+ service: foo
+
+ string:
+ image: test
+ env_file: test.env
+
+ list:
+ image: test
+ env_file:
+ - test.env
+
+ mapping:
+ image: test
+ env_file:
+ - path: test.env
diff --git a/pkg/compose/testdata/publish/test.env b/pkg/compose/testdata/publish/test.env
new file mode 100644
index 00000000000..6e1f61b59ea
--- /dev/null
+++ b/pkg/compose/testdata/publish/test.env
@@ -0,0 +1 @@
+HELLO=WORLD
\ No newline at end of file
diff --git a/pkg/compose/top.go b/pkg/compose/top.go
index a65b72874f8..5a766cbe08b 100644
--- a/pkg/compose/top.go
+++ b/pkg/compose/top.go
@@ -18,12 +18,14 @@ package compose
import (
"context"
+ "strings"
- "github.com/docker/compose/v2/pkg/api"
+ "github.com/docker/compose/v5/pkg/api"
"golang.org/x/sync/errgroup"
)
func (s *composeService) Top(ctx context.Context, projectName string, services []string) ([]api.ContainerProcSummary, error) {
+ projectName = strings.ToLower(projectName)
var containers Containers
containers, err := s.getContainers(ctx, projectName, oneOffInclude, false)
if err != nil {
@@ -34,19 +36,27 @@ func (s *composeService) Top(ctx context.Context, projectName string, services [
}
summary := make([]api.ContainerProcSummary, len(containers))
eg, ctx := errgroup.WithContext(ctx)
- for i, container := range containers {
- i, container := i, container
+ for i, ctr := range containers {
eg.Go(func() error {
- topContent, err := s.apiClient().ContainerTop(ctx, container.ID, []string{})
+ topContent, err := s.apiClient().ContainerTop(ctx, ctr.ID, []string{})
if err != nil {
return err
}
- summary[i] = api.ContainerProcSummary{
- ID: container.ID,
- Name: getCanonicalContainerName(container),
+ name := getCanonicalContainerName(ctr)
+ s := api.ContainerProcSummary{
+ ID: ctr.ID,
+ Name: name,
Processes: topContent.Processes,
Titles: topContent.Titles,
+ Service: name,
}
+ if service, exists := ctr.Labels[api.ServiceLabel]; exists {
+ s.Service = service
+ }
+ if replica, exists := ctr.Labels[api.ContainerNumberLabel]; exists {
+ s.Replica = replica
+ }
+ summary[i] = s
return nil
})
}
diff --git a/pkg/compose/transform/replace.go b/pkg/compose/transform/replace.go
new file mode 100644
index 00000000000..8fdaf60b9c4
--- /dev/null
+++ b/pkg/compose/transform/replace.go
@@ -0,0 +1,149 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package transform
+
+import (
+ "fmt"
+
+ "gopkg.in/yaml.v3"
+)
+
+// ReplaceExtendsFile changes value for service.extends.file in input yaml stream, preserving formatting
+func ReplaceExtendsFile(in []byte, service string, value string) ([]byte, error) {
+ var doc yaml.Node
+ err := yaml.Unmarshal(in, &doc)
+ if err != nil {
+ return nil, err
+ }
+ if doc.Kind != yaml.DocumentNode {
+ return nil, fmt.Errorf("expected document kind %v, got %v", yaml.DocumentNode, doc.Kind)
+ }
+ root := doc.Content[0]
+ if root.Kind != yaml.MappingNode {
+ return nil, fmt.Errorf("expected document root to be a mapping, got %v", root.Kind)
+ }
+
+ services, err := getMapping(root, "services")
+ if err != nil {
+ return nil, err
+ }
+
+ target, err := getMapping(services, service)
+ if err != nil {
+ return nil, err
+ }
+
+ extends, err := getMapping(target, "extends")
+ if err != nil {
+ return nil, err
+ }
+
+ file, err := getMapping(extends, "file")
+ if err != nil {
+ return nil, err
+ }
+
+ // we've found target `file` yaml node. Let's replace value in stream at node position
+ return replace(in, file.Line, file.Column, value), nil
+}
+
+// ReplaceEnvFile changes value for service.extends.env_file in input yaml stream, preserving formatting
+func ReplaceEnvFile(in []byte, service string, i int, value string) ([]byte, error) {
+ var doc yaml.Node
+ err := yaml.Unmarshal(in, &doc)
+ if err != nil {
+ return nil, err
+ }
+ if doc.Kind != yaml.DocumentNode {
+ return nil, fmt.Errorf("expected document kind %v, got %v", yaml.DocumentNode, doc.Kind)
+ }
+ root := doc.Content[0]
+ if root.Kind != yaml.MappingNode {
+ return nil, fmt.Errorf("expected document root to be a mapping, got %v", root.Kind)
+ }
+
+ services, err := getMapping(root, "services")
+ if err != nil {
+ return nil, err
+ }
+
+ target, err := getMapping(services, service)
+ if err != nil {
+ return nil, err
+ }
+
+ envFile, err := getMapping(target, "env_file")
+ if err != nil {
+ return nil, err
+ }
+
+ // env_file can be either a string, sequence of strings, or sequence of mappings with path attribute
+ if envFile.Kind == yaml.SequenceNode {
+ envFile = envFile.Content[i]
+ if envFile.Kind == yaml.MappingNode {
+ envFile, err = getMapping(envFile, "path")
+ if err != nil {
+ return nil, err
+ }
+ }
+ return replace(in, envFile.Line, envFile.Column, value), nil
+ } else {
+ return replace(in, envFile.Line, envFile.Column, value), nil
+ }
+}
+
+func getMapping(root *yaml.Node, key string) (*yaml.Node, error) {
+ var node *yaml.Node
+ l := len(root.Content)
+ for i := 0; i < l; i += 2 {
+ k := root.Content[i]
+ if k.Kind != yaml.ScalarNode || k.Tag != "!!str" {
+ return nil, fmt.Errorf("expected mapping key to be a string, got %v %v", root.Kind, k.Tag)
+ }
+ if k.Value == key {
+ node = root.Content[i+1]
+ return node, nil
+ }
+ }
+ return nil, fmt.Errorf("key %v not found", key)
+}
+
+// replace changes yaml node value in stream at position, preserving content
+func replace(in []byte, line int, column int, value string) []byte {
+ var out []byte
+ l := 1
+ pos := 0
+ for _, b := range in {
+ if b == '\n' {
+ l++
+ if l == line {
+ break
+ }
+ }
+ pos++
+ }
+ pos += column
+ out = append(out, in[0:pos]...)
+ out = append(out, []byte(value)...)
+ for ; pos < len(in); pos++ {
+ if in[pos] == '\n' {
+ break
+ }
+ }
+ out = append(out, in[pos:]...)
+ return out
+}
diff --git a/pkg/compose/transform/replace_test.go b/pkg/compose/transform/replace_test.go
new file mode 100644
index 00000000000..6f7477e0350
--- /dev/null
+++ b/pkg/compose/transform/replace_test.go
@@ -0,0 +1,85 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package transform
+
+import (
+ "reflect"
+ "testing"
+
+ "gotest.tools/v3/assert"
+)
+
+func TestReplace(t *testing.T) {
+ tests := []struct {
+ name string
+ in string
+ want string
+ }{
+ {
+ name: "simple",
+ in: `services:
+ test:
+ extends:
+ file: foo.yaml
+ service: foo
+`,
+ want: `services:
+ test:
+ extends:
+ file: REPLACED
+ service: foo
+`,
+ },
+ {
+ name: "last line",
+ in: `services:
+ test:
+ extends:
+ service: foo
+ file: foo.yaml
+`,
+ want: `services:
+ test:
+ extends:
+ service: foo
+ file: REPLACED
+`,
+ },
+ {
+ name: "last line no CR",
+ in: `services:
+ test:
+ extends:
+ service: foo
+ file: foo.yaml`,
+ want: `services:
+ test:
+ extends:
+ service: foo
+ file: REPLACED`,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := ReplaceExtendsFile([]byte(tt.in), "test", "REPLACED")
+ assert.NilError(t, err)
+ if !reflect.DeepEqual(got, []byte(tt.want)) {
+ t.Errorf("ReplaceExtendsFile() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/pkg/compose/up.go b/pkg/compose/up.go
index d8e8d050f42..ad1630fc3a4 100644
--- a/pkg/compose/up.go
+++ b/pkg/compose/up.go
@@ -18,21 +18,28 @@ package compose
import (
"context"
+ "errors"
"fmt"
"os"
"os/signal"
+ "slices"
+ "sync"
+ "sync/atomic"
"syscall"
- "github.com/docker/compose/v2/pkg/api"
- "github.com/docker/compose/v2/pkg/progress"
-
- "github.com/compose-spec/compose-go/types"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/containerd/errdefs"
"github.com/docker/cli/cli"
+ "github.com/docker/compose/v5/cmd/formatter"
+ "github.com/docker/compose/v5/internal/tracing"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/eiannone/keyboard"
+ "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
-func (s *composeService) Up(ctx context.Context, project *types.Project, options api.UpOptions) error {
- err := progress.Run(ctx, func(ctx context.Context) error {
+func (s *composeService) Up(ctx context.Context, project *types.Project, options api.UpOptions) error { //nolint:gocyclo
+ err := Run(ctx, tracing.SpanWrapFunc("project/up", tracing.ProjectOptions(ctx, project), func(ctx context.Context) error {
err := s.create(ctx, project, options.Create)
if err != nil {
return err
@@ -41,7 +48,7 @@ func (s *composeService) Up(ctx context.Context, project *types.Project, options
return s.start(ctx, project.Name, options.Start, nil)
}
return nil
- })
+ }), "up", s.events)
if err != nil {
return err
}
@@ -49,48 +56,241 @@ func (s *composeService) Up(ctx context.Context, project *types.Project, options
if options.Start.Attach == nil {
return err
}
+ if s.dryRun {
+ _, _ = fmt.Fprintln(s.stdout(), "end of 'compose up' output, interactive run is not supported in dry-run mode")
+ return err
+ }
- printer := newLogPrinter(options.Start.Attach)
-
- signalChan := make(chan os.Signal, 1)
+ // if we get a second signal during shutdown, we kill the services
+ // immediately, so the channel needs to have sufficient capacity or
+ // we might miss a signal while setting up the second channel read
+ // (this is also why signal.Notify is used vs signal.NotifyContext)
+ signalChan := make(chan os.Signal, 2)
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
+ defer signal.Stop(signalChan)
+ var isTerminated atomic.Bool
+
+ var (
+ logConsumer = options.Start.Attach
+ navigationMenu *formatter.LogKeyboard
+ kEvents <-chan keyboard.KeyEvent
+ )
+ if options.Start.NavigationMenu {
+ kEvents, err = keyboard.GetKeys(100)
+ if err != nil {
+ logrus.Warnf("could not start menu, an error occurred while starting: %v", err)
+ options.Start.NavigationMenu = false
+ } else {
+ defer keyboard.Close() //nolint:errcheck
+ isDockerDesktopActive, err := s.isDesktopIntegrationActive(ctx)
+ if err != nil {
+ return err
+ }
+ tracing.KeyboardMetrics(ctx, options.Start.NavigationMenu, isDockerDesktopActive)
+ navigationMenu = formatter.NewKeyboardManager(isDockerDesktopActive, signalChan)
+ logConsumer = navigationMenu.Decorate(logConsumer)
+ }
+ }
+
+ watcher, err := NewWatcher(project, options, s.watch, logConsumer)
+ if err != nil && options.Start.Watch {
+ return err
+ }
+
+ if navigationMenu != nil && watcher != nil {
+ navigationMenu.EnableWatch(options.Start.Watch, watcher)
+ }
+
+ printer := newLogPrinter(logConsumer)
- stopFunc := func() error {
- ctx := context.Background()
- return progress.Run(ctx, func(ctx context.Context) error {
- go func() {
- <-signalChan
- s.Kill(ctx, project.Name, api.KillOptions{ // nolint:errcheck
+ // global context to handle canceling goroutines
+ globalCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ if navigationMenu != nil {
+ navigationMenu.EnableDetach(cancel)
+ }
+
+ var (
+ eg errgroup.Group
+ mu sync.Mutex
+ errs []error
+ )
+
+ appendErr := func(err error) {
+ if err != nil {
+ mu.Lock()
+ errs = append(errs, err)
+ mu.Unlock()
+ }
+ }
+
+ eg.Go(func() error {
+ first := true
+ gracefulTeardown := func() {
+ first = false
+ s.events.On(newEvent(api.ResourceCompose, api.Working, api.StatusStopping, "Gracefully Stopping... press Ctrl+C again to force"))
+ eg.Go(func() error {
+ err = s.stop(context.WithoutCancel(globalCtx), project.Name, api.StopOptions{
Services: options.Create.Services,
+ Project: project,
+ }, printer.HandleEvent)
+ appendErr(err)
+ return nil
+ })
+ isTerminated.Store(true)
+ }
+
+ for {
+ select {
+ case <-globalCtx.Done():
+ if watcher != nil {
+ return watcher.Stop()
+ }
+ return nil
+ case <-ctx.Done():
+ if first {
+ gracefulTeardown()
+ }
+ case <-signalChan:
+ if first {
+ _ = keyboard.Close()
+ gracefulTeardown()
+ break
+ }
+ eg.Go(func() error {
+ err := s.kill(context.WithoutCancel(globalCtx), project.Name, api.KillOptions{
+ Services: options.Create.Services,
+ Project: project,
+ All: true,
+ })
+ // Ignore errors indicating that some of the containers were already stopped or removed.
+ if errdefs.IsNotFound(err) || errdefs.IsConflict(err) || errors.Is(err, api.ErrNoResources) {
+ return nil
+ }
+
+ appendErr(err)
+ return nil
+ })
+ return nil
+ case event := <-kEvents:
+ navigationMenu.HandleKeyEvents(globalCtx, event, project, options)
+ }
+ }
+ })
+
+ if options.Start.Watch && watcher != nil {
+ if err := watcher.Start(globalCtx); err != nil {
+ // cancel the global context to terminate background goroutines
+ cancel()
+ _ = eg.Wait()
+ return err
+ }
+ }
+
+ monitor := newMonitor(s.apiClient(), project.Name)
+ if len(options.Start.Services) > 0 {
+ monitor.withServices(options.Start.Services)
+ } else {
+ // Start.AttachTo have been already curated with only the services to monitor
+ monitor.withServices(options.Start.AttachTo)
+ }
+ monitor.withListener(printer.HandleEvent)
+
+ var exitCode int
+ if options.Start.OnExit != api.CascadeIgnore {
+ once := true
+ // detect first container to exit to trigger application shutdown
+ monitor.withListener(func(event api.ContainerEvent) {
+ if once && event.Type == api.ContainerEventExited {
+ if options.Start.OnExit == api.CascadeFail && event.ExitCode == 0 {
+ return
+ }
+ once = false
+ exitCode = event.ExitCode
+ s.events.On(newEvent(api.ResourceCompose, api.Working, api.StatusStopping, "Aborting on container exit..."))
+ eg.Go(func() error {
+ err = s.stop(context.WithoutCancel(globalCtx), project.Name, api.StopOptions{
+ Services: options.Create.Services,
+ Project: project,
+ }, printer.HandleEvent)
+ appendErr(err)
+ return nil
})
- }()
+ }
+ })
+ }
- return s.Stop(ctx, project.Name, api.StopOptions{
- Services: options.Create.Services,
- })
+ if options.Start.ExitCodeFrom != "" {
+ once := true
+ // capture exit code from first container to exit with selected service
+ monitor.withListener(func(event api.ContainerEvent) {
+ if once && event.Type == api.ContainerEventExited && event.Service == options.Start.ExitCodeFrom {
+ exitCode = event.ExitCode
+ once = false
+ }
})
}
- go func() {
- <-signalChan
- printer.Cancel()
- fmt.Println("Gracefully stopping... (press Ctrl+C again to force)")
- stopFunc() // nolint:errcheck
- }()
- var exitCode int
- eg, ctx := errgroup.WithContext(ctx)
- eg.Go(func() error {
- code, err := printer.Run(context.Background(), options.Start.CascadeStop, options.Start.ExitCodeFrom, stopFunc)
- exitCode = code
+ containers, err := s.attach(globalCtx, project, printer.HandleEvent, options.Start.AttachTo)
+ if err != nil {
+ cancel()
+ _ = eg.Wait()
return err
+ }
+ attached := make([]string, len(containers))
+ for i, ctr := range containers {
+ attached[i] = ctr.ID
+ }
+
+ monitor.withListener(func(event api.ContainerEvent) {
+ if event.Type != api.ContainerEventStarted {
+ return
+ }
+ if slices.Contains(attached, event.ID) && !event.Restarting {
+ return
+ }
+ eg.Go(func() error {
+ ctr, err := s.apiClient().ContainerInspect(globalCtx, event.ID)
+ if err != nil {
+ appendErr(err)
+ return nil
+ }
+
+ err = s.doLogContainer(globalCtx, options.Start.Attach, event.Source, ctr, api.LogOptions{
+ Follow: true,
+ Since: ctr.State.StartedAt,
+ })
+ if errdefs.IsNotImplemented(err) {
+ // container may be configured with logging_driver: none
+ // as container already started, we might miss the very first logs. But still better than none
+ err := s.doAttachContainer(globalCtx, event.Service, event.ID, event.Source, printer.HandleEvent)
+ appendErr(err)
+ return nil
+ }
+ appendErr(err)
+ return nil
+ })
})
- err = s.start(ctx, project.Name, options.Start, printer.HandleEvent)
- if err != nil {
+ eg.Go(func() error {
+ err := monitor.Start(globalCtx)
+ // cancel the global context to terminate signal-handler goroutines
+ cancel()
+ appendErr(err)
+ return nil
+ })
+
+ // We use the parent context without cancellation as we manage sigterm to stop the stack
+ err = s.start(context.WithoutCancel(ctx), project.Name, options.Start, printer.HandleEvent)
+ if err != nil && !isTerminated.Load() { // Ignore error if the process is terminated
+ cancel()
+ _ = eg.Wait()
return err
}
- err = eg.Wait()
+ _ = eg.Wait()
+ err = errors.Join(errs...)
if exitCode != 0 {
errMsg := ""
if err != nil {
diff --git a/pkg/compose/viz.go b/pkg/compose/viz.go
new file mode 100644
index 00000000000..18c1af7793b
--- /dev/null
+++ b/pkg/compose/viz.go
@@ -0,0 +1,140 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "strconv"
+ "strings"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/compose/v5/pkg/api"
+)
+
+// maps a service with the services it depends on
+type vizGraph map[*types.ServiceConfig][]*types.ServiceConfig
+
+func (s *composeService) Viz(_ context.Context, project *types.Project, opts api.VizOptions) (string, error) {
+ graph := make(vizGraph)
+ for _, service := range project.Services {
+ graph[&service] = make([]*types.ServiceConfig, 0, len(service.DependsOn))
+ for dependencyName := range service.DependsOn {
+ // no error should be returned since dependencyName should exist
+ dependency, _ := project.GetService(dependencyName)
+ graph[&service] = append(graph[&service], &dependency)
+ }
+ }
+
+ // build graphviz graph
+ var graphBuilder strings.Builder
+
+ // graph name
+ graphBuilder.WriteString("digraph ")
+ writeQuoted(&graphBuilder, project.Name)
+ graphBuilder.WriteString(" {\n")
+
+ // graph layout
+ // dot is the perfect layout for this use case since graph is directed and hierarchical
+ graphBuilder.WriteString(opts.Indentation + "layout=dot;\n")
+
+ addNodes(&graphBuilder, graph, project.Name, &opts)
+ graphBuilder.WriteByte('\n')
+
+ addEdges(&graphBuilder, graph, &opts)
+ graphBuilder.WriteString("}\n")
+
+ return graphBuilder.String(), nil
+}
+
+// addNodes adds the corresponding graphviz representation of all the nodes in the given graph to the graphBuilder
+// returns the same graphBuilder
+func addNodes(graphBuilder *strings.Builder, graph vizGraph, projectName string, opts *api.VizOptions) *strings.Builder {
+ for serviceNode := range graph {
+ // write:
+ // "service name" [style="filled" label<service name
+ graphBuilder.WriteString(opts.Indentation)
+ writeQuoted(graphBuilder, serviceNode.Name)
+ graphBuilder.WriteString(" [style=\"filled\" label=<")
+ graphBuilder.WriteString(serviceNode.Name)
+ graphBuilder.WriteString("")
+
+ if opts.IncludeNetworks && len(serviceNode.Networks) > 0 {
+ graphBuilder.WriteString("")
+ graphBuilder.WriteString("
Networks:")
+ for _, networkName := range serviceNode.NetworksByPriority() {
+ graphBuilder.WriteString("
")
+ graphBuilder.WriteString(networkName)
+ }
+ graphBuilder.WriteString("")
+ }
+
+ if opts.IncludePorts && len(serviceNode.Ports) > 0 {
+ graphBuilder.WriteString("")
+ graphBuilder.WriteString("
Ports:")
+ for _, portConfig := range serviceNode.Ports {
+ graphBuilder.WriteString("
")
+ if portConfig.HostIP != "" {
+ graphBuilder.WriteString(portConfig.HostIP)
+ graphBuilder.WriteByte(':')
+ }
+ graphBuilder.WriteString(portConfig.Published)
+ graphBuilder.WriteByte(':')
+ graphBuilder.WriteString(strconv.Itoa(int(portConfig.Target)))
+ graphBuilder.WriteString(" (")
+ graphBuilder.WriteString(portConfig.Protocol)
+ graphBuilder.WriteString(", ")
+ graphBuilder.WriteString(portConfig.Mode)
+ graphBuilder.WriteString(")")
+ }
+ graphBuilder.WriteString("")
+ }
+
+ if opts.IncludeImageName {
+ graphBuilder.WriteString("")
+ graphBuilder.WriteString("
Image:
")
+ graphBuilder.WriteString(api.GetImageNameOrDefault(*serviceNode, projectName))
+ graphBuilder.WriteString("")
+ }
+
+ graphBuilder.WriteString(">];\n")
+ }
+
+ return graphBuilder
+}
+
+// addEdges adds the corresponding graphviz representation of all edges in the given graph to the graphBuilder
+// returns the same graphBuilder
+func addEdges(graphBuilder *strings.Builder, graph vizGraph, opts *api.VizOptions) *strings.Builder {
+ for parent, children := range graph {
+ for _, child := range children {
+ graphBuilder.WriteString(opts.Indentation)
+ writeQuoted(graphBuilder, parent.Name)
+ graphBuilder.WriteString(" -> ")
+ writeQuoted(graphBuilder, child.Name)
+ graphBuilder.WriteString(";\n")
+ }
+ }
+
+ return graphBuilder
+}
+
+// writeQuoted writes "str" to builder
+func writeQuoted(builder *strings.Builder, str string) {
+ builder.WriteByte('"')
+ builder.WriteString(str)
+ builder.WriteByte('"')
+}
diff --git a/pkg/compose/viz_test.go b/pkg/compose/viz_test.go
new file mode 100644
index 00000000000..ae66de34f41
--- /dev/null
+++ b/pkg/compose/viz_test.go
@@ -0,0 +1,219 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "strconv"
+ "testing"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+
+ compose "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/mocks"
+)
+
+func TestViz(t *testing.T) {
+ project := types.Project{
+ Name: "viz-test",
+ WorkingDir: "/home",
+ Services: types.Services{
+ "service1": {
+ Name: "service1",
+ Image: "image-for-service1",
+ Ports: []types.ServicePortConfig{
+ {
+ Published: "80",
+ Target: 80,
+ Protocol: "tcp",
+ },
+ {
+ Published: "53",
+ Target: 533,
+ Protocol: "udp",
+ },
+ },
+ Networks: map[string]*types.ServiceNetworkConfig{
+ "internal": nil,
+ },
+ },
+ "service2": {
+ Name: "service2",
+ Image: "image-for-service2",
+ Ports: []types.ServicePortConfig{},
+ },
+ "service3": {
+ Name: "service3",
+ Image: "some-image",
+ DependsOn: map[string]types.ServiceDependency{
+ "service2": {},
+ "service1": {},
+ },
+ },
+ "service4": {
+ Name: "service4",
+ Image: "another-image",
+ DependsOn: map[string]types.ServiceDependency{
+ "service3": {},
+ },
+ Ports: []types.ServicePortConfig{
+ {
+ Published: "8080",
+ Target: 80,
+ },
+ },
+ Networks: map[string]*types.ServiceNetworkConfig{
+ "external": nil,
+ },
+ },
+ "With host IP": {
+ Name: "With host IP",
+ Image: "user/image-name",
+ DependsOn: map[string]types.ServiceDependency{
+ "service1": {},
+ },
+ Ports: []types.ServicePortConfig{
+ {
+ Published: "8888",
+ Target: 8080,
+ HostIP: "127.0.0.1",
+ },
+ },
+ },
+ },
+ Networks: types.Networks{
+ "internal": types.NetworkConfig{},
+ "external": types.NetworkConfig{},
+ "not-used": types.NetworkConfig{},
+ },
+ Volumes: nil,
+ Secrets: nil,
+ Configs: nil,
+ Extensions: nil,
+ ComposeFiles: nil,
+ Environment: nil,
+ DisabledServices: nil,
+ Profiles: nil,
+ }
+
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ cli := mocks.NewMockCli(mockCtrl)
+ tested, err := NewComposeService(cli)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ t.Run("viz (no ports, networks or image)", func(t *testing.T) {
+ graphStr, err := tested.Viz(ctx, &project, compose.VizOptions{
+ Indentation: " ",
+ IncludePorts: false,
+ IncludeImageName: false,
+ IncludeNetworks: false,
+ })
+ require.NoError(t, err, "viz command failed")
+
+ // check indentation
+ assert.Contains(t, graphStr, "\n ", graphStr)
+ assert.NotContains(t, graphStr, "\n ", graphStr)
+
+ // check digraph name
+ assert.Contains(t, graphStr, "digraph \""+project.Name+"\"", graphStr)
+
+ // check nodes
+ for _, service := range project.Services {
+ assert.Contains(t, graphStr, "\""+service.Name+"\" [style=\"filled\"", graphStr)
+ }
+
+ // check node attributes
+ assert.NotContains(t, graphStr, "Networks", graphStr)
+ assert.NotContains(t, graphStr, "Image", graphStr)
+ assert.NotContains(t, graphStr, "Ports", graphStr)
+
+ // check edges that SHOULD exist in the generated graph
+ allowedEdges := make(map[string][]string)
+ for name, service := range project.Services {
+ allowed := make([]string, 0, len(service.DependsOn))
+ for depName := range service.DependsOn {
+ allowed = append(allowed, depName)
+ }
+ allowedEdges[name] = allowed
+ }
+ for serviceName, dependencies := range allowedEdges {
+ for _, dependencyName := range dependencies {
+ assert.Contains(t, graphStr, "\""+serviceName+"\" -> \""+dependencyName+"\"", graphStr)
+ }
+ }
+
+ // check edges that SHOULD NOT exist in the generated graph
+ forbiddenEdges := make(map[string][]string)
+ for name, service := range project.Services {
+ forbiddenEdges[name] = make([]string, 0, len(project.ServiceNames())-len(service.DependsOn))
+ for _, serviceName := range project.ServiceNames() {
+ _, edgeExists := service.DependsOn[serviceName]
+ if !edgeExists {
+ forbiddenEdges[name] = append(forbiddenEdges[name], serviceName)
+ }
+ }
+ }
+ for serviceName, forbiddenDeps := range forbiddenEdges {
+ for _, forbiddenDep := range forbiddenDeps {
+ assert.NotContains(t, graphStr, "\""+serviceName+"\" -> \""+forbiddenDep+"\"")
+ }
+ }
+ })
+
+ t.Run("viz (with ports, networks and image)", func(t *testing.T) {
+ graphStr, err := tested.Viz(ctx, &project, compose.VizOptions{
+ Indentation: "\t",
+ IncludePorts: true,
+ IncludeImageName: true,
+ IncludeNetworks: true,
+ })
+ require.NoError(t, err, "viz command failed")
+
+ // check indentation
+ assert.Contains(t, graphStr, "\n\t", graphStr)
+ assert.NotContains(t, graphStr, "\n\t\t", graphStr)
+
+ // check digraph name
+ assert.Contains(t, graphStr, "digraph \""+project.Name+"\"", graphStr)
+
+ // check nodes
+ for _, service := range project.Services {
+ assert.Contains(t, graphStr, "\""+service.Name+"\" [style=\"filled\"", graphStr)
+ }
+
+ // check node attributes
+ assert.Contains(t, graphStr, "Networks", graphStr)
+ assert.Contains(t, graphStr, ">internal<", graphStr)
+ assert.Contains(t, graphStr, ">external<", graphStr)
+ assert.Contains(t, graphStr, "Image", graphStr)
+ for _, service := range project.Services {
+ assert.Contains(t, graphStr, ">"+service.Image+"<", graphStr)
+ }
+ assert.Contains(t, graphStr, "Ports", graphStr)
+ for _, service := range project.Services {
+ for _, portConfig := range service.Ports {
+ assert.NotContains(t, graphStr, ">"+portConfig.Published+":"+strconv.Itoa(int(portConfig.Target))+"<", graphStr)
+ }
+ }
+ })
+}
diff --git a/pkg/compose/volumes.go b/pkg/compose/volumes.go
new file mode 100644
index 00000000000..03a12a268d8
--- /dev/null
+++ b/pkg/compose/volumes.go
@@ -0,0 +1,82 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "slices"
+
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/volume"
+)
+
+func (s *composeService) Volumes(ctx context.Context, project string, options api.VolumesOptions) ([]api.VolumesSummary, error) {
+ allContainers, err := s.apiClient().ContainerList(ctx, container.ListOptions{
+ Filters: filters.NewArgs(projectFilter(project)),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ var containers []container.Summary
+
+ if len(options.Services) > 0 {
+ // filter service containers
+ for _, c := range allContainers {
+ if slices.Contains(options.Services, c.Labels[api.ServiceLabel]) {
+ containers = append(containers, c)
+ }
+ }
+ } else {
+ containers = allContainers
+ }
+
+ volumesResponse, err := s.apiClient().VolumeList(ctx, volume.ListOptions{
+ Filters: filters.NewArgs(projectFilter(project)),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ projectVolumes := volumesResponse.Volumes
+
+ if len(options.Services) == 0 {
+ return projectVolumes, nil
+ }
+
+ var volumes []api.VolumesSummary
+
+ // create a name lookup of volumes used by containers
+ serviceVolumes := make(map[string]bool)
+
+ for _, container := range containers {
+ for _, mount := range container.Mounts {
+ serviceVolumes[mount.Name] = true
+ }
+ }
+
+ // append if volumes in this project are in serviceVolumes
+ for _, v := range projectVolumes {
+ if serviceVolumes[v.Name] {
+ volumes = append(volumes, v)
+ }
+ }
+
+ return volumes, nil
+}
diff --git a/pkg/compose/volumes_test.go b/pkg/compose/volumes_test.go
new file mode 100644
index 00000000000..d60dc2144a7
--- /dev/null
+++ b/pkg/compose/volumes_test.go
@@ -0,0 +1,87 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "testing"
+
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/volume"
+ "go.uber.org/mock/gomock"
+ "gotest.tools/v3/assert"
+)
+
+func TestVolumes(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+
+ mockApi, mockCli := prepareMocks(mockCtrl)
+ tested := composeService{
+ dockerCli: mockCli,
+ }
+
+ // Create test volumes
+ vol1 := &volume.Volume{Name: testProject + "_vol1"}
+ vol2 := &volume.Volume{Name: testProject + "_vol2"}
+ vol3 := &volume.Volume{Name: testProject + "_vol3"}
+
+ // Create test containers with volume mounts
+ c1 := container.Summary{
+ Labels: map[string]string{api.ServiceLabel: "service1"},
+ Mounts: []container.MountPoint{
+ {Name: testProject + "_vol1"},
+ {Name: testProject + "_vol2"},
+ },
+ }
+ c2 := container.Summary{
+ Labels: map[string]string{api.ServiceLabel: "service2"},
+ Mounts: []container.MountPoint{
+ {Name: testProject + "_vol3"},
+ },
+ }
+
+ ctx := context.Background()
+ args := filters.NewArgs(projectFilter(testProject))
+ listOpts := container.ListOptions{Filters: args}
+ volumeListArgs := filters.NewArgs(projectFilter(testProject))
+ volumeListOpts := volume.ListOptions{Filters: volumeListArgs}
+ volumeReturn := volume.ListResponse{
+ Volumes: []*volume.Volume{vol1, vol2, vol3},
+ }
+ containerReturn := []container.Summary{c1, c2}
+
+ // Mock API calls
+ mockApi.EXPECT().ContainerList(ctx, listOpts).Times(2).Return(containerReturn, nil)
+ mockApi.EXPECT().VolumeList(ctx, volumeListOpts).Times(2).Return(volumeReturn, nil)
+
+ // Test without service filter - should return all project volumes
+ volumeOptions := api.VolumesOptions{}
+ volumes, err := tested.Volumes(ctx, testProject, volumeOptions)
+ expected := []api.VolumesSummary{vol1, vol2, vol3}
+ assert.NilError(t, err)
+ assert.DeepEqual(t, volumes, expected)
+
+ // Test with service filter - should only return volumes used by service1
+ volumeOptions = api.VolumesOptions{Services: []string{"service1"}}
+ volumes, err = tested.Volumes(ctx, testProject, volumeOptions)
+ expected = []api.VolumesSummary{vol1, vol2}
+ assert.NilError(t, err)
+ assert.DeepEqual(t, volumes, expected)
+}
diff --git a/pkg/compose/wait.go b/pkg/compose/wait.go
new file mode 100644
index 00000000000..003a1816ead
--- /dev/null
+++ b/pkg/compose/wait.go
@@ -0,0 +1,66 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/docker/compose/v5/pkg/api"
+ "golang.org/x/sync/errgroup"
+)
+
+func (s *composeService) Wait(ctx context.Context, projectName string, options api.WaitOptions) (int64, error) {
+ containers, err := s.getContainers(ctx, projectName, oneOffInclude, false, options.Services...)
+ if err != nil {
+ return 0, err
+ }
+ if len(containers) == 0 {
+ return 0, fmt.Errorf("no containers for project %q", projectName)
+ }
+
+ eg, waitCtx := errgroup.WithContext(ctx)
+ var statusCode int64
+ for _, ctr := range containers {
+ eg.Go(func() error {
+ var err error
+ resultC, errC := s.apiClient().ContainerWait(waitCtx, ctr.ID, "")
+
+ select {
+ case result := <-resultC:
+ _, _ = fmt.Fprintf(s.stdout(), "container %q exited with status code %d\n", ctr.ID, result.StatusCode)
+ statusCode = result.StatusCode
+ case err = <-errC:
+ }
+
+ return err
+ })
+ }
+
+ err = eg.Wait()
+ if err != nil {
+ return 42, err // Ignore abort flag in case of error in wait
+ }
+
+ if options.DownProjectOnContainerExit {
+ return statusCode, s.Down(ctx, projectName, api.DownOptions{
+ RemoveOrphans: true,
+ })
+ }
+
+ return statusCode, err
+}
diff --git a/pkg/compose/watch.go b/pkg/compose/watch.go
new file mode 100644
index 00000000000..e5625ed3813
--- /dev/null
+++ b/pkg/compose/watch.go
@@ -0,0 +1,841 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path"
+ "path/filepath"
+ "slices"
+ "strconv"
+ "strings"
+ gsync "sync"
+ "time"
+
+ pathutil "github.com/docker/compose/v5/internal/paths"
+ "github.com/docker/compose/v5/internal/sync"
+ "github.com/docker/compose/v5/internal/tracing"
+ "github.com/docker/compose/v5/pkg/api"
+ cutils "github.com/docker/compose/v5/pkg/utils"
+ "github.com/docker/compose/v5/pkg/watch"
+ "github.com/moby/buildkit/util/progress/progressui"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/compose-spec/compose-go/v2/utils"
+ ccli "github.com/docker/cli/cli/command/container"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/go-viper/mapstructure/v2"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sync/errgroup"
+)
+
+type WatchFunc func(ctx context.Context, project *types.Project, options api.WatchOptions) (func() error, error)
+
+type Watcher struct {
+ project *types.Project
+ options api.WatchOptions
+ watchFn WatchFunc
+ stopFn func()
+ errCh chan error
+}
+
+func NewWatcher(project *types.Project, options api.UpOptions, w WatchFunc, consumer api.LogConsumer) (*Watcher, error) {
+ for i := range project.Services {
+ service := project.Services[i]
+
+ if service.Develop != nil && service.Develop.Watch != nil {
+ build := options.Create.Build
+ return &Watcher{
+ project: project,
+ options: api.WatchOptions{
+ LogTo: consumer,
+ Build: build,
+ },
+ watchFn: w,
+ errCh: make(chan error),
+ }, nil
+ }
+ }
+ // none of the services is eligible to watch
+ return nil, fmt.Errorf("none of the selected services is configured for watch, see https://docs.docker.com/compose/how-tos/file-watch/")
+}
+
+// ensure state changes are atomic
+var mx gsync.Mutex
+
+func (w *Watcher) Start(ctx context.Context) error {
+ mx.Lock()
+ defer mx.Unlock()
+ ctx, cancelFunc := context.WithCancel(ctx)
+ w.stopFn = cancelFunc
+ wait, err := w.watchFn(ctx, w.project, w.options)
+ if err != nil {
+ go func() {
+ w.errCh <- err
+ }()
+ return err
+ }
+ go func() {
+ w.errCh <- wait()
+ }()
+ return nil
+}
+
+func (w *Watcher) Stop() error {
+ mx.Lock()
+ defer mx.Unlock()
+ if w.stopFn == nil {
+ return nil
+ }
+ w.stopFn()
+ w.stopFn = nil
+ err := <-w.errCh
+ return err
+}
+
+// getSyncImplementation returns an appropriate sync implementation for the
+// project.
+//
+// Currently, an implementation that batches files and transfers them using
+// the Moby `Untar` API.
+func (s *composeService) getSyncImplementation(project *types.Project) (sync.Syncer, error) {
+ var useTar bool
+ if useTarEnv, ok := os.LookupEnv("COMPOSE_EXPERIMENTAL_WATCH_TAR"); ok {
+ useTar, _ = strconv.ParseBool(useTarEnv)
+ } else {
+ useTar = true
+ }
+ if !useTar {
+ return nil, errors.New("no available sync implementation")
+ }
+
+ return sync.NewTar(project.Name, tarDockerClient{s: s}), nil
+}
+
+func (s *composeService) Watch(ctx context.Context, project *types.Project, options api.WatchOptions) error {
+ wait, err := s.watch(ctx, project, options)
+ if err != nil {
+ return err
+ }
+ return wait()
+}
+
+type watchRule struct {
+ types.Trigger
+ include watch.PathMatcher
+ ignore watch.PathMatcher
+ service string
+}
+
+func (r watchRule) Matches(event watch.FileEvent) *sync.PathMapping {
+ hostPath := string(event)
+ if !pathutil.IsChild(r.Path, hostPath) {
+ return nil
+ }
+ included, err := r.include.Matches(hostPath)
+ if err != nil {
+ logrus.Warnf("error include matching %q: %v", hostPath, err)
+ return nil
+ }
+ if !included {
+ logrus.Debugf("%s is not matching include pattern", hostPath)
+ return nil
+ }
+ isIgnored, err := r.ignore.Matches(hostPath)
+ if err != nil {
+ logrus.Warnf("error ignore matching %q: %v", hostPath, err)
+ return nil
+ }
+
+ if isIgnored {
+ logrus.Debugf("%s is matching ignore pattern", hostPath)
+ return nil
+ }
+
+ var containerPath string
+ if r.Target != "" {
+ rel, err := filepath.Rel(r.Path, hostPath)
+ if err != nil {
+ logrus.Warnf("error making %s relative to %s: %v", hostPath, r.Path, err)
+ return nil
+ }
+ // always use Unix-style paths for inside the container
+ containerPath = path.Join(r.Target, filepath.ToSlash(rel))
+ }
+ return &sync.PathMapping{
+ HostPath: hostPath,
+ ContainerPath: containerPath,
+ }
+}
+
+func (s *composeService) watch(ctx context.Context, project *types.Project, options api.WatchOptions) (func() error, error) { //nolint: gocyclo
+ var err error
+ if project, err = project.WithSelectedServices(options.Services); err != nil {
+ return nil, err
+ }
+ syncer, err := s.getSyncImplementation(project)
+ if err != nil {
+ return nil, err
+ }
+ eg, ctx := errgroup.WithContext(ctx)
+
+ var (
+ rules []watchRule
+ paths []string
+ )
+ for serviceName, service := range project.Services {
+ config, err := loadDevelopmentConfig(service, project)
+ if err != nil {
+ return nil, err
+ }
+
+ if service.Develop != nil {
+ config = service.Develop
+ }
+
+ if config == nil {
+ continue
+ }
+
+ for _, trigger := range config.Watch {
+ if trigger.Action == types.WatchActionRebuild {
+ if service.Build == nil {
+ return nil, fmt.Errorf("can't watch service %q with action %s without a build context", service.Name, types.WatchActionRebuild)
+ }
+ if options.Build == nil {
+ return nil, fmt.Errorf("--no-build is incompatible with watch action %s in service %s", types.WatchActionRebuild, service.Name)
+ }
+ // set the service to always be built - watch triggers `Up()` when it receives a rebuild event
+ service.PullPolicy = types.PullPolicyBuild
+ project.Services[serviceName] = service
+ }
+ }
+
+ for _, trigger := range config.Watch {
+ if isSync(trigger) && checkIfPathAlreadyBindMounted(trigger.Path, service.Volumes) {
+ logrus.Warnf("path '%s' also declared by a bind mount volume, this path won't be monitored!\n", trigger.Path)
+ continue
+ } else {
+ shouldInitialSync := trigger.InitialSync
+
+ // Check legacy extension attribute for backward compatibility
+ if !shouldInitialSync {
+ var legacyInitialSync bool
+ success, err := trigger.Extensions.Get("x-initialSync", &legacyInitialSync)
+ if err == nil && success && legacyInitialSync {
+ shouldInitialSync = true
+ logrus.Warnf("x-initialSync is DEPRECATED, please use the official `initial_sync` attribute\n")
+ }
+ }
+
+ if shouldInitialSync && isSync(trigger) {
+ // Need to check initial files are in container that are meant to be synced from watch action
+ err := s.initialSync(ctx, project, service, trigger, syncer)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ paths = append(paths, trigger.Path)
+ }
+
+ serviceWatchRules, err := getWatchRules(config, service)
+ if err != nil {
+ return nil, err
+ }
+ rules = append(rules, serviceWatchRules...)
+ }
+
+ if len(paths) == 0 {
+ return nil, fmt.Errorf("none of the selected services is configured for watch, consider setting a 'develop' section")
+ }
+
+ watcher, err := watch.NewWatcher(paths)
+ if err != nil {
+ return nil, err
+ }
+
+ err = watcher.Start()
+ if err != nil {
+ return nil, err
+ }
+
+ eg.Go(func() error {
+ return s.watchEvents(ctx, project, options, watcher, syncer, rules)
+ })
+ options.LogTo.Log(api.WatchLogger, "Watch enabled")
+
+ return func() error {
+ err := eg.Wait()
+ if werr := watcher.Close(); werr != nil {
+ logrus.Debugf("Error closing Watcher: %v", werr)
+ }
+ return err
+ }, nil
+}
+
+func getWatchRules(config *types.DevelopConfig, service types.ServiceConfig) ([]watchRule, error) {
+ var rules []watchRule
+
+ dockerIgnores, err := watch.LoadDockerIgnore(service.Build)
+ if err != nil {
+ return nil, err
+ }
+
+ // add a hardcoded set of ignores on top of what came from .dockerignore
+ // some of this should likely be configurable (e.g. there could be cases
+ // where you want `.git` to be synced) but this is suitable for now
+ dotGitIgnore, err := watch.NewDockerPatternMatcher("/", []string{".git/"})
+ if err != nil {
+ return nil, err
+ }
+
+ for _, trigger := range config.Watch {
+ ignore, err := watch.NewDockerPatternMatcher(trigger.Path, trigger.Ignore)
+ if err != nil {
+ return nil, err
+ }
+
+ var include watch.PathMatcher
+ if len(trigger.Include) == 0 {
+ include = watch.AnyMatcher{}
+ } else {
+ include, err = watch.NewDockerPatternMatcher(trigger.Path, trigger.Include)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ rules = append(rules, watchRule{
+ Trigger: trigger,
+ include: include,
+ ignore: watch.NewCompositeMatcher(
+ dockerIgnores,
+ watch.EphemeralPathMatcher(),
+ dotGitIgnore,
+ ignore,
+ ),
+ service: service.Name,
+ })
+ }
+ return rules, nil
+}
+
+func isSync(trigger types.Trigger) bool {
+ return trigger.Action == types.WatchActionSync || trigger.Action == types.WatchActionSyncRestart
+}
+
+func (s *composeService) watchEvents(ctx context.Context, project *types.Project, options api.WatchOptions, watcher watch.Notify, syncer sync.Syncer, rules []watchRule) error {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // debounce and group filesystem events so that we capture IDE saving many files as one "batch" event
+ batchEvents := watch.BatchDebounceEvents(ctx, s.clock, watcher.Events())
+
+ for {
+ select {
+ case <-ctx.Done():
+ options.LogTo.Log(api.WatchLogger, "Watch disabled")
+ return nil
+ case err, open := <-watcher.Errors():
+ if err != nil {
+ options.LogTo.Err(api.WatchLogger, "Watch disabled with errors: "+err.Error())
+ }
+ if open {
+ continue
+ }
+ return err
+ case batch := <-batchEvents:
+ start := time.Now()
+ logrus.Debugf("batch start: count[%d]", len(batch))
+ err := s.handleWatchBatch(ctx, project, options, batch, rules, syncer)
+ if err != nil {
+ logrus.Warnf("Error handling changed files: %v", err)
+ }
+ logrus.Debugf("batch complete: duration[%s] count[%d]", time.Since(start), len(batch))
+ }
+ }
+}
+
+func loadDevelopmentConfig(service types.ServiceConfig, project *types.Project) (*types.DevelopConfig, error) {
+ var config types.DevelopConfig
+ y, ok := service.Extensions["x-develop"]
+ if !ok {
+ return nil, nil
+ }
+ logrus.Warnf("x-develop is DEPRECATED, please use the official `develop` attribute")
+ err := mapstructure.Decode(y, &config)
+ if err != nil {
+ return nil, err
+ }
+ baseDir, err := filepath.EvalSymlinks(project.WorkingDir)
+ if err != nil {
+ return nil, fmt.Errorf("resolving symlink for %q: %w", project.WorkingDir, err)
+ }
+
+ for i, trigger := range config.Watch {
+ if !filepath.IsAbs(trigger.Path) {
+ trigger.Path = filepath.Join(baseDir, trigger.Path)
+ }
+ if p, err := filepath.EvalSymlinks(trigger.Path); err == nil {
+ // this might fail because the path doesn't exist, etc.
+ trigger.Path = p
+ }
+ trigger.Path = filepath.Clean(trigger.Path)
+ if trigger.Path == "" {
+ return nil, errors.New("watch rules MUST define a path")
+ }
+
+ if trigger.Action == types.WatchActionRebuild && service.Build == nil {
+ return nil, fmt.Errorf("service %s doesn't have a build section, can't apply %s on watch", types.WatchActionRebuild, service.Name)
+ }
+ if trigger.Action == types.WatchActionSyncExec && len(trigger.Exec.Command) == 0 {
+ return nil, fmt.Errorf("can't watch with action %q on service %s without a command", types.WatchActionSyncExec, service.Name)
+ }
+
+ config.Watch[i] = trigger
+ }
+ return &config, nil
+}
+
+func checkIfPathAlreadyBindMounted(watchPath string, volumes []types.ServiceVolumeConfig) bool {
+ for _, volume := range volumes {
+ if volume.Bind != nil {
+ relPath, err := filepath.Rel(volume.Source, watchPath)
+ if err == nil && !strings.HasPrefix(relPath, "..") {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+type tarDockerClient struct {
+ s *composeService
+}
+
+func (t tarDockerClient) ContainersForService(ctx context.Context, projectName string, serviceName string) ([]container.Summary, error) {
+ containers, err := t.s.getContainers(ctx, projectName, oneOffExclude, true, serviceName)
+ if err != nil {
+ return nil, err
+ }
+ return containers, nil
+}
+
+func (t tarDockerClient) Exec(ctx context.Context, containerID string, cmd []string, in io.Reader) error {
+ execCfg := container.ExecOptions{
+ Cmd: cmd,
+ AttachStdout: false,
+ AttachStderr: true,
+ AttachStdin: in != nil,
+ Tty: false,
+ }
+ execCreateResp, err := t.s.apiClient().ContainerExecCreate(ctx, containerID, execCfg)
+ if err != nil {
+ return err
+ }
+
+ startCheck := container.ExecStartOptions{Tty: false, Detach: false}
+ conn, err := t.s.apiClient().ContainerExecAttach(ctx, execCreateResp.ID, startCheck)
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ var eg errgroup.Group
+ if in != nil {
+ eg.Go(func() error {
+ defer func() {
+ _ = conn.CloseWrite()
+ }()
+ _, err := io.Copy(conn.Conn, in)
+ return err
+ })
+ }
+ eg.Go(func() error {
+ _, err := io.Copy(t.s.stdout(), conn.Reader)
+ return err
+ })
+
+ err = t.s.apiClient().ContainerExecStart(ctx, execCreateResp.ID, startCheck)
+ if err != nil {
+ return err
+ }
+
+ // although the errgroup is not tied directly to the context, the operations
+ // in it are reading/writing to the connection, which is tied to the context,
+ // so they won't block indefinitely
+ if err := eg.Wait(); err != nil {
+ return err
+ }
+
+ execResult, err := t.s.apiClient().ContainerExecInspect(ctx, execCreateResp.ID)
+ if err != nil {
+ return err
+ }
+ if execResult.Running {
+ return errors.New("process still running")
+ }
+ if execResult.ExitCode != 0 {
+ return fmt.Errorf("exit code %d", execResult.ExitCode)
+ }
+ return nil
+}
+
+func (t tarDockerClient) Untar(ctx context.Context, id string, archive io.ReadCloser) error {
+ return t.s.apiClient().CopyToContainer(ctx, id, "/", archive, container.CopyToContainerOptions{
+ CopyUIDGID: true,
+ })
+}
+
+//nolint:gocyclo
+func (s *composeService) handleWatchBatch(ctx context.Context, project *types.Project, options api.WatchOptions, batch []watch.FileEvent, rules []watchRule, syncer sync.Syncer) error {
+ var (
+ restart = map[string]bool{}
+ syncfiles = map[string][]*sync.PathMapping{}
+ exec = map[string][]int{}
+ rebuild = map[string]bool{}
+ )
+ for _, event := range batch {
+ for i, rule := range rules {
+ mapping := rule.Matches(event)
+ if mapping == nil {
+ continue
+ }
+
+ switch rule.Action {
+ case types.WatchActionRebuild:
+ rebuild[rule.service] = true
+ case types.WatchActionSync:
+ syncfiles[rule.service] = append(syncfiles[rule.service], mapping)
+ case types.WatchActionRestart:
+ restart[rule.service] = true
+ case types.WatchActionSyncRestart:
+ syncfiles[rule.service] = append(syncfiles[rule.service], mapping)
+ restart[rule.service] = true
+ case types.WatchActionSyncExec:
+ syncfiles[rule.service] = append(syncfiles[rule.service], mapping)
+ // We want to run exec hooks only once after syncfiles if multiple file events match
+ // as we can't compare ServiceHook to sort and compact a slice, collect rule indexes
+ exec[rule.service] = append(exec[rule.service], i)
+ }
+ }
+ }
+
+ logrus.Debugf("watch actions: rebuild %d sync %d restart %d", len(rebuild), len(syncfiles), len(restart))
+
+ if len(rebuild) > 0 {
+ err := s.rebuild(ctx, project, utils.MapKeys(rebuild), options)
+ if err != nil {
+ return err
+ }
+ }
+
+ for serviceName, pathMappings := range syncfiles {
+ writeWatchSyncMessage(options.LogTo, serviceName, pathMappings)
+ err := syncer.Sync(ctx, serviceName, pathMappings)
+ if err != nil {
+ return err
+ }
+ }
+ if len(restart) > 0 {
+ services := utils.MapKeys(restart)
+ err := s.restart(ctx, project.Name, api.RestartOptions{
+ Services: services,
+ Project: project,
+ NoDeps: false,
+ })
+ if err != nil {
+ return err
+ }
+ options.LogTo.Log(
+ api.WatchLogger,
+ fmt.Sprintf("service(s) %q restarted", services))
+ }
+
+ eg, ctx := errgroup.WithContext(ctx)
+ for service, rulesToExec := range exec {
+ slices.Sort(rulesToExec)
+ for _, i := range slices.Compact(rulesToExec) {
+ err := s.exec(ctx, project, service, rules[i].Exec, eg)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return eg.Wait()
+}
+
+func (s *composeService) exec(ctx context.Context, project *types.Project, serviceName string, x types.ServiceHook, eg *errgroup.Group) error {
+ containers, err := s.getContainers(ctx, project.Name, oneOffExclude, false, serviceName)
+ if err != nil {
+ return err
+ }
+ for _, c := range containers {
+ eg.Go(func() error {
+ exec := ccli.NewExecOptions()
+ exec.User = x.User
+ exec.Privileged = x.Privileged
+ exec.Command = x.Command
+ exec.Workdir = x.WorkingDir
+ exec.DetachKeys = s.configFile().DetachKeys
+ for _, v := range x.Environment.ToMapping().Values() {
+ err := exec.Env.Set(v)
+ if err != nil {
+ return err
+ }
+ }
+ return ccli.RunExec(ctx, s.dockerCli, c.ID, exec)
+ })
+ }
+ return nil
+}
+
+func (s *composeService) rebuild(ctx context.Context, project *types.Project, services []string, options api.WatchOptions) error {
+ options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Rebuilding service(s) %q after changes were detected...", services))
+ // restrict the build to ONLY this service, not any of its dependencies
+ options.Build.Services = services
+ options.Build.Progress = string(progressui.PlainMode)
+ options.Build.Out = cutils.GetWriter(func(line string) {
+ options.LogTo.Log(api.WatchLogger, line)
+ })
+
+ var (
+ imageNameToIdMap map[string]string
+ err error
+ )
+ err = tracing.SpanWrapFunc("project/build", tracing.ProjectOptions(ctx, project),
+ func(ctx context.Context) error {
+ imageNameToIdMap, err = s.build(ctx, project, *options.Build, nil)
+ return err
+ })(ctx)
+ if err != nil {
+ options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Build failed. Error: %v", err))
+ return err
+ }
+
+ if options.Prune {
+ s.pruneDanglingImagesOnRebuild(ctx, project.Name, imageNameToIdMap)
+ }
+
+ options.LogTo.Log(api.WatchLogger, fmt.Sprintf("service(s) %q successfully built", services))
+
+ err = s.create(ctx, project, api.CreateOptions{
+ Services: services,
+ Inherit: true,
+ Recreate: api.RecreateForce,
+ })
+ if err != nil {
+ options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Failed to recreate services after update. Error: %v", err))
+ return err
+ }
+
+ p, err := project.WithSelectedServices(services, types.IncludeDependents)
+ if err != nil {
+ return err
+ }
+ err = s.start(ctx, project.Name, api.StartOptions{
+ Project: p,
+ Services: services,
+ AttachTo: services,
+ }, nil)
+ if err != nil {
+ options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Application failed to start after update. Error: %v", err))
+ }
+ return nil
+}
+
+// writeWatchSyncMessage prints out a message about the sync for the changed paths.
+func writeWatchSyncMessage(log api.LogConsumer, serviceName string, pathMappings []*sync.PathMapping) {
+ if logrus.IsLevelEnabled(logrus.DebugLevel) {
+ hostPathsToSync := make([]string, len(pathMappings))
+ for i := range pathMappings {
+ hostPathsToSync[i] = pathMappings[i].HostPath
+ }
+ log.Log(
+ api.WatchLogger,
+ fmt.Sprintf(
+ "Syncing service %q after changes were detected: %s",
+ serviceName,
+ strings.Join(hostPathsToSync, ", "),
+ ),
+ )
+ } else {
+ log.Log(
+ api.WatchLogger,
+ fmt.Sprintf("Syncing service %q after %d changes were detected", serviceName, len(pathMappings)),
+ )
+ }
+}
+
+func (s *composeService) pruneDanglingImagesOnRebuild(ctx context.Context, projectName string, imageNameToIdMap map[string]string) {
+ images, err := s.apiClient().ImageList(ctx, image.ListOptions{
+ Filters: filters.NewArgs(
+ filters.Arg("dangling", "true"),
+ filters.Arg("label", api.ProjectLabel+"="+projectName),
+ ),
+ })
+ if err != nil {
+ logrus.Debugf("Failed to list images: %v", err)
+ return
+ }
+
+ for _, img := range images {
+ if _, ok := imageNameToIdMap[img.ID]; !ok {
+ _, err := s.apiClient().ImageRemove(ctx, img.ID, image.RemoveOptions{})
+ if err != nil {
+ logrus.Debugf("Failed to remove image %s: %v", img.ID, err)
+ }
+ }
+ }
+}
+
+// Walks develop.watch.path and checks which files should be copied inside the container
+// ignores develop.watch.ignore, Dockerfile, compose files, bind mounted paths and .git
+func (s *composeService) initialSync(ctx context.Context, project *types.Project, service types.ServiceConfig, trigger types.Trigger, syncer sync.Syncer) error {
+ dockerIgnores, err := watch.LoadDockerIgnore(service.Build)
+ if err != nil {
+ return err
+ }
+
+ dotGitIgnore, err := watch.NewDockerPatternMatcher("/", []string{".git/"})
+ if err != nil {
+ return err
+ }
+
+ triggerIgnore, err := watch.NewDockerPatternMatcher(trigger.Path, trigger.Ignore)
+ if err != nil {
+ return err
+ }
+ // FIXME .dockerignore
+ ignoreInitialSync := watch.NewCompositeMatcher(
+ dockerIgnores,
+ watch.EphemeralPathMatcher(),
+ dotGitIgnore,
+ triggerIgnore)
+
+ pathsToCopy, err := s.initialSyncFiles(ctx, project, service, trigger, ignoreInitialSync)
+ if err != nil {
+ return err
+ }
+
+ return syncer.Sync(ctx, service.Name, pathsToCopy)
+}
+
+// Syncs files from develop.watch.path if thy have been modified after the image has been created
+//
+//nolint:gocyclo
+func (s *composeService) initialSyncFiles(ctx context.Context, project *types.Project, service types.ServiceConfig, trigger types.Trigger, ignore watch.PathMatcher) ([]*sync.PathMapping, error) {
+ fi, err := os.Stat(trigger.Path)
+ if err != nil {
+ return nil, err
+ }
+ timeImageCreated, err := s.imageCreatedTime(ctx, project, service.Name)
+ if err != nil {
+ return nil, err
+ }
+ var pathsToCopy []*sync.PathMapping
+ switch mode := fi.Mode(); {
+ case mode.IsDir():
+ // process directory
+ err = filepath.WalkDir(trigger.Path, func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ // handle possible path err, just in case...
+ return err
+ }
+ if trigger.Path == path {
+ // walk starts at the root directory
+ return nil
+ }
+ if shouldIgnore(filepath.Base(path), ignore) || checkIfPathAlreadyBindMounted(path, service.Volumes) {
+ // By definition sync ignores bind mounted paths
+ if d.IsDir() {
+ // skip folder
+ return fs.SkipDir
+ }
+ return nil // skip file
+ }
+ info, err := d.Info()
+ if err != nil {
+ return err
+ }
+ if !d.IsDir() {
+ if info.ModTime().Before(timeImageCreated) {
+ // skip file if it was modified before image creation
+ return nil
+ }
+ rel, err := filepath.Rel(trigger.Path, path)
+ if err != nil {
+ return err
+ }
+ // only copy files (and not full directories)
+ pathsToCopy = append(pathsToCopy, &sync.PathMapping{
+ HostPath: path,
+ ContainerPath: filepath.Join(trigger.Target, rel),
+ })
+ }
+ return nil
+ })
+ case mode.IsRegular():
+ // process file
+ if fi.ModTime().After(timeImageCreated) && !shouldIgnore(filepath.Base(trigger.Path), ignore) && !checkIfPathAlreadyBindMounted(trigger.Path, service.Volumes) {
+ pathsToCopy = append(pathsToCopy, &sync.PathMapping{
+ HostPath: trigger.Path,
+ ContainerPath: trigger.Target,
+ })
+ }
+ }
+ return pathsToCopy, err
+}
+
+func shouldIgnore(name string, ignore watch.PathMatcher) bool {
+ shouldIgnore, _ := ignore.Matches(name)
+ // ignore files that match any ignore pattern
+ return shouldIgnore
+}
+
+// gets the image creation time for a service
+func (s *composeService) imageCreatedTime(ctx context.Context, project *types.Project, serviceName string) (time.Time, error) {
+ containers, err := s.apiClient().ContainerList(ctx, container.ListOptions{
+ All: true,
+ Filters: filters.NewArgs(
+ filters.Arg("label", fmt.Sprintf("%s=%s", api.ProjectLabel, project.Name)),
+ filters.Arg("label", fmt.Sprintf("%s=%s", api.ServiceLabel, serviceName))),
+ })
+ if err != nil {
+ return time.Now(), err
+ }
+ if len(containers) == 0 {
+ return time.Now(), fmt.Errorf("could not get created time for service's image")
+ }
+
+ img, err := s.apiClient().ImageInspect(ctx, containers[0].ImageID)
+ if err != nil {
+ return time.Now(), err
+ }
+ // Need to get the oldest one?
+ timeCreated, err := time.Parse(time.RFC3339Nano, img.Created)
+ if err != nil {
+ return time.Now(), err
+ }
+ return timeCreated, nil
+}
diff --git a/pkg/compose/watch_test.go b/pkg/compose/watch_test.go
new file mode 100644
index 00000000000..d5d8cd081e4
--- /dev/null
+++ b/pkg/compose/watch_test.go
@@ -0,0 +1,187 @@
+/*
+
+ Copyright 2020 Docker Compose CLI authors
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package compose
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli/streams"
+ "github.com/docker/compose/v5/internal/sync"
+ "github.com/docker/compose/v5/pkg/api"
+ "github.com/docker/compose/v5/pkg/mocks"
+ "github.com/docker/compose/v5/pkg/watch"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/jonboulle/clockwork"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+ "gotest.tools/v3/assert"
+)
+
+type testWatcher struct {
+ events chan watch.FileEvent
+ errors chan error
+}
+
+func (t testWatcher) Start() error {
+ return nil
+}
+
+func (t testWatcher) Close() error {
+ return nil
+}
+
+func (t testWatcher) Events() chan watch.FileEvent {
+ return t.events
+}
+
+func (t testWatcher) Errors() chan error {
+ return t.errors
+}
+
+type stdLogger struct{}
+
+func (s stdLogger) Log(containerName, message string) {
+ fmt.Printf("%s: %s\n", containerName, message)
+}
+
+func (s stdLogger) Err(containerName, message string) {
+ fmt.Fprintf(os.Stderr, "%s: %s\n", containerName, message)
+}
+
+func (s stdLogger) Status(containerName, msg string) {
+ fmt.Printf("%s: %s\n", containerName, msg)
+}
+
+func TestWatch_Sync(t *testing.T) {
+ mockCtrl := gomock.NewController(t)
+ cli := mocks.NewMockCli(mockCtrl)
+ cli.EXPECT().Err().Return(streams.NewOut(os.Stderr)).AnyTimes()
+ apiClient := mocks.NewMockAPIClient(mockCtrl)
+ apiClient.EXPECT().ContainerList(gomock.Any(), gomock.Any()).Return([]container.Summary{
+ testContainer("test", "123", false),
+ }, nil).AnyTimes()
+ // we expect the image to be pruned
+ apiClient.EXPECT().ImageList(gomock.Any(), image.ListOptions{
+ Filters: filters.NewArgs(
+ filters.Arg("dangling", "true"),
+ filters.Arg("label", api.ProjectLabel+"=myProjectName"),
+ ),
+ }).Return([]image.Summary{
+ {ID: "123"},
+ {ID: "456"},
+ }, nil).Times(1)
+ apiClient.EXPECT().ImageRemove(gomock.Any(), "123", image.RemoveOptions{}).Times(1)
+ apiClient.EXPECT().ImageRemove(gomock.Any(), "456", image.RemoveOptions{}).Times(1)
+ //
+ cli.EXPECT().Client().Return(apiClient).AnyTimes()
+
+ ctx, cancelFunc := context.WithCancel(context.Background())
+ t.Cleanup(cancelFunc)
+
+ proj := types.Project{
+ Name: "myProjectName",
+ Services: types.Services{
+ "test": {
+ Name: "test",
+ },
+ },
+ }
+
+ watcher := testWatcher{
+ events: make(chan watch.FileEvent),
+ errors: make(chan error),
+ }
+
+ syncer := newFakeSyncer()
+ clock := clockwork.NewFakeClock()
+ go func() {
+ service := composeService{
+ dockerCli: cli,
+ clock: clock,
+ }
+ rules, err := getWatchRules(&types.DevelopConfig{
+ Watch: []types.Trigger{
+ {
+ Path: "/sync",
+ Action: "sync",
+ Target: "/work",
+ Ignore: []string{"ignore"},
+ },
+ {
+ Path: "/rebuild",
+ Action: "rebuild",
+ },
+ },
+ }, types.ServiceConfig{Name: "test"})
+ assert.NilError(t, err)
+
+ err = service.watchEvents(ctx, &proj, api.WatchOptions{
+ Build: &api.BuildOptions{},
+ LogTo: stdLogger{},
+ Prune: true,
+ }, watcher, syncer, rules)
+ assert.NilError(t, err)
+ }()
+
+ watcher.Events() <- watch.NewFileEvent("/sync/changed")
+ watcher.Events() <- watch.NewFileEvent("/sync/changed/sub")
+ err := clock.BlockUntilContext(ctx, 3)
+ assert.NilError(t, err)
+ clock.Advance(watch.QuietPeriod)
+ select {
+ case actual := <-syncer.synced:
+ require.ElementsMatch(t, []*sync.PathMapping{
+ {HostPath: "/sync/changed", ContainerPath: "/work/changed"},
+ {HostPath: "/sync/changed/sub", ContainerPath: "/work/changed/sub"},
+ }, actual)
+ case <-time.After(100 * time.Millisecond):
+ t.Error("timeout")
+ }
+
+ watcher.Events() <- watch.NewFileEvent("/rebuild")
+ watcher.Events() <- watch.NewFileEvent("/sync/changed")
+ err = clock.BlockUntilContext(ctx, 4)
+ assert.NilError(t, err)
+ clock.Advance(watch.QuietPeriod)
+ select {
+ case batch := <-syncer.synced:
+ t.Fatalf("received unexpected events: %v", batch)
+ case <-time.After(100 * time.Millisecond):
+ // expected
+ }
+ // TODO: there's not a great way to assert that the rebuild attempt happened
+}
+
+type fakeSyncer struct {
+ synced chan []*sync.PathMapping
+}
+
+func newFakeSyncer() *fakeSyncer {
+ return &fakeSyncer{
+ synced: make(chan []*sync.PathMapping),
+ }
+}
+
+func (f *fakeSyncer) Sync(ctx context.Context, service string, paths []*sync.PathMapping) error {
+ f.synced <- paths
+ return nil
+}
diff --git a/pkg/dryrun/dryrunclient.go b/pkg/dryrun/dryrunclient.go
new file mode 100644
index 00000000000..6f393cac8fd
--- /dev/null
+++ b/pkg/dryrun/dryrunclient.go
@@ -0,0 +1,692 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package dryrun
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "runtime"
+ "strings"
+ "sync"
+
+ "github.com/docker/buildx/builder"
+ "github.com/docker/buildx/util/imagetools"
+ "github.com/docker/cli/cli/command"
+ moby "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/build"
+ "github.com/docker/docker/api/types/checkpoint"
+ containerType "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/events"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/api/types/swarm"
+ "github.com/docker/docker/api/types/system"
+ "github.com/docker/docker/api/types/volume"
+ "github.com/docker/docker/client"
+ "github.com/docker/docker/pkg/jsonmessage"
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+var _ client.APIClient = &DryRunClient{}
+
+// DryRunClient implements APIClient by delegating to implementation functions. This allows lazy init and per-method overrides
+type DryRunClient struct {
+ apiClient client.APIClient
+ containers []containerType.Summary
+ execs sync.Map
+ resolver *imagetools.Resolver
+}
+
+type execDetails struct {
+ container string
+ command []string
+}
+
+// NewDryRunClient produces a DryRunClient
+func NewDryRunClient(apiClient client.APIClient, cli command.Cli) (*DryRunClient, error) {
+ b, err := builder.New(cli, builder.WithSkippedValidation())
+ if err != nil {
+ return nil, err
+ }
+ configFile, err := b.ImageOpt()
+ if err != nil {
+ return nil, err
+ }
+ return &DryRunClient{
+ apiClient: apiClient,
+ containers: []containerType.Summary{},
+ execs: sync.Map{},
+ resolver: imagetools.New(configFile),
+ }, nil
+}
+
+func getCallingFunction() string {
+ pc, _, _, _ := runtime.Caller(2)
+ fullName := runtime.FuncForPC(pc).Name()
+ return fullName[strings.LastIndex(fullName, ".")+1:]
+}
+
+// All methods and functions which need to be overridden for dry run.
+
+func (d *DryRunClient) ContainerAttach(ctx context.Context, container string, options containerType.AttachOptions) (moby.HijackedResponse, error) {
+ return moby.HijackedResponse{}, errors.New("interactive run is not supported in dry-run mode")
+}
+
+func (d *DryRunClient) ContainerCreate(ctx context.Context, config *containerType.Config, hostConfig *containerType.HostConfig,
+ networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string,
+) (containerType.CreateResponse, error) {
+ d.containers = append(d.containers, containerType.Summary{
+ ID: containerName,
+ Names: []string{containerName},
+ Labels: config.Labels,
+ HostConfig: struct {
+ NetworkMode string `json:",omitempty"`
+ Annotations map[string]string `json:",omitempty"`
+ }{},
+ })
+ return containerType.CreateResponse{ID: containerName}, nil
+}
+
+func (d *DryRunClient) ContainerInspect(ctx context.Context, container string) (containerType.InspectResponse, error) {
+ containerJSON, err := d.apiClient.ContainerInspect(ctx, container)
+ if err != nil {
+ id := "dryRunId"
+ for _, c := range d.containers {
+ if c.ID == container {
+ id = container
+ }
+ }
+ return containerType.InspectResponse{
+ ContainerJSONBase: &containerType.ContainerJSONBase{
+ ID: id,
+ Name: container,
+ State: &containerType.State{
+ Status: containerType.StateRunning, // needed for --wait option
+ Health: &containerType.Health{
+ Status: containerType.Healthy, // needed for healthcheck control
+ },
+ },
+ },
+ Mounts: nil,
+ Config: &containerType.Config{},
+ NetworkSettings: &containerType.NetworkSettings{},
+ }, nil
+ }
+ return containerJSON, err
+}
+
+func (d *DryRunClient) ContainerKill(ctx context.Context, container, signal string) error {
+ return nil
+}
+
+func (d *DryRunClient) ContainerList(ctx context.Context, options containerType.ListOptions) ([]containerType.Summary, error) {
+ caller := getCallingFunction()
+ switch caller {
+ case "start":
+ return d.containers, nil
+ case "getContainers":
+ if len(d.containers) == 0 {
+ var err error
+ d.containers, err = d.apiClient.ContainerList(ctx, options)
+ return d.containers, err
+ }
+ }
+ return d.apiClient.ContainerList(ctx, options)
+}
+
+func (d *DryRunClient) ContainerPause(ctx context.Context, container string) error {
+ return nil
+}
+
+func (d *DryRunClient) ContainerRemove(ctx context.Context, container string, options containerType.RemoveOptions) error {
+ return nil
+}
+
+func (d *DryRunClient) ContainerRename(ctx context.Context, container, newContainerName string) error {
+ return nil
+}
+
+func (d *DryRunClient) ContainerRestart(ctx context.Context, container string, options containerType.StopOptions) error {
+ return nil
+}
+
+func (d *DryRunClient) ContainerStart(ctx context.Context, container string, options containerType.StartOptions) error {
+ return nil
+}
+
+func (d *DryRunClient) ContainerStop(ctx context.Context, container string, options containerType.StopOptions) error {
+ return nil
+}
+
+func (d *DryRunClient) ContainerUnpause(ctx context.Context, container string) error {
+ return nil
+}
+
+func (d *DryRunClient) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, containerType.PathStat, error) {
+ rc := io.NopCloser(strings.NewReader(""))
+ if _, err := d.ContainerStatPath(ctx, container, srcPath); err != nil {
+ return rc, containerType.PathStat{}, fmt.Errorf("could not find the file %s in container %s", srcPath, container)
+ }
+ return rc, containerType.PathStat{}, nil
+}
+
+func (d *DryRunClient) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options containerType.CopyToContainerOptions) error {
+ return nil
+}
+
+func (d *DryRunClient) ImageBuild(ctx context.Context, reader io.Reader, options build.ImageBuildOptions) (build.ImageBuildResponse, error) {
+ rc := io.NopCloser(bytes.NewReader(nil))
+
+ return build.ImageBuildResponse{
+ Body: rc,
+ }, nil
+}
+
+func (d *DryRunClient) ImageInspect(ctx context.Context, imageName string, options ...client.ImageInspectOption) (image.InspectResponse, error) {
+ caller := getCallingFunction()
+ switch caller {
+ case "pullServiceImage", "buildContainerVolumes":
+ return image.InspectResponse{ID: "dryRunId"}, nil
+ default:
+ return d.apiClient.ImageInspect(ctx, imageName, options...)
+ }
+}
+
+// Deprecated: Use [DryRunClient.ImageInspect] instead; raw response can be obtained by [client.ImageInspectWithRawResponse] option.
+func (d *DryRunClient) ImageInspectWithRaw(ctx context.Context, imageName string) (image.InspectResponse, []byte, error) {
+ var buf bytes.Buffer
+ resp, err := d.ImageInspect(ctx, imageName, client.ImageInspectWithRawResponse(&buf))
+ if err != nil {
+ return image.InspectResponse{}, nil, err
+ }
+ return resp, buf.Bytes(), err
+}
+
+func (d *DryRunClient) ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error) {
+ if _, _, err := d.resolver.Resolve(ctx, ref); err != nil {
+ return nil, err
+ }
+ rc := io.NopCloser(strings.NewReader(""))
+ return rc, nil
+}
+
+func (d *DryRunClient) ImagePush(ctx context.Context, ref string, options image.PushOptions) (io.ReadCloser, error) {
+ if _, _, err := d.resolver.Resolve(ctx, ref); err != nil {
+ return nil, err
+ }
+ jsonMessage, err := json.Marshal(&jsonmessage.JSONMessage{
+ Status: "Pushed",
+ Progress: &jsonmessage.JSONProgress{
+ Current: 100,
+ Total: 100,
+ Start: 0,
+ HideCounts: false,
+ Units: "Mb",
+ },
+ ID: ref,
+ })
+ if err != nil {
+ return nil, err
+ }
+ rc := io.NopCloser(bytes.NewReader(jsonMessage))
+ return rc, nil
+}
+
+func (d *DryRunClient) ImageRemove(ctx context.Context, imageName string, options image.RemoveOptions) ([]image.DeleteResponse, error) {
+ return nil, nil
+}
+
+func (d *DryRunClient) NetworkConnect(ctx context.Context, networkName, container string, config *network.EndpointSettings) error {
+ return nil
+}
+
+func (d *DryRunClient) NetworkCreate(ctx context.Context, name string, options network.CreateOptions) (network.CreateResponse, error) {
+ return network.CreateResponse{
+ ID: name,
+ Warning: "",
+ }, nil
+}
+
+func (d *DryRunClient) NetworkDisconnect(ctx context.Context, networkName, container string, force bool) error {
+ return nil
+}
+
+func (d *DryRunClient) NetworkRemove(ctx context.Context, networkName string) error {
+ return nil
+}
+
+func (d *DryRunClient) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) {
+ return volume.Volume{
+ ClusterVolume: nil,
+ Driver: options.Driver,
+ Labels: options.Labels,
+ Name: options.Name,
+ Options: options.DriverOpts,
+ }, nil
+}
+
+func (d *DryRunClient) VolumeRemove(ctx context.Context, volumeID string, force bool) error {
+ return nil
+}
+
+func (d *DryRunClient) ContainerExecCreate(ctx context.Context, container string, config containerType.ExecOptions) (containerType.ExecCreateResponse, error) {
+ b := make([]byte, 32)
+ _, _ = rand.Read(b)
+ id := fmt.Sprintf("%x", b)
+ d.execs.Store(id, execDetails{
+ container: container,
+ command: config.Cmd,
+ })
+ return containerType.ExecCreateResponse{
+ ID: id,
+ }, nil
+}
+
+func (d *DryRunClient) ContainerExecStart(ctx context.Context, execID string, config containerType.ExecStartOptions) error {
+ _, ok := d.execs.LoadAndDelete(execID)
+ if !ok {
+ return fmt.Errorf("invalid exec ID %q", execID)
+ }
+ return nil
+}
+
+// Functions delegated to original APIClient (not used by Compose or not modifying the Compose stack
+
+func (d *DryRunClient) ConfigList(ctx context.Context, options swarm.ConfigListOptions) ([]swarm.Config, error) {
+ return d.apiClient.ConfigList(ctx, options)
+}
+
+func (d *DryRunClient) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (swarm.ConfigCreateResponse, error) {
+ return d.apiClient.ConfigCreate(ctx, config)
+}
+
+func (d *DryRunClient) ConfigRemove(ctx context.Context, id string) error {
+ return d.apiClient.ConfigRemove(ctx, id)
+}
+
+func (d *DryRunClient) ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) {
+ return d.apiClient.ConfigInspectWithRaw(ctx, name)
+}
+
+func (d *DryRunClient) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error {
+ return d.apiClient.ConfigUpdate(ctx, id, version, config)
+}
+
+func (d *DryRunClient) ContainerCommit(ctx context.Context, container string, options containerType.CommitOptions) (containerType.CommitResponse, error) {
+ return d.apiClient.ContainerCommit(ctx, container, options)
+}
+
+func (d *DryRunClient) ContainerDiff(ctx context.Context, container string) ([]containerType.FilesystemChange, error) {
+ return d.apiClient.ContainerDiff(ctx, container)
+}
+
+func (d *DryRunClient) ContainerExecAttach(ctx context.Context, execID string, config containerType.ExecStartOptions) (moby.HijackedResponse, error) {
+ return moby.HijackedResponse{}, errors.New("interactive exec is not supported in dry-run mode")
+}
+
+func (d *DryRunClient) ContainerExecInspect(ctx context.Context, execID string) (containerType.ExecInspect, error) {
+ return d.apiClient.ContainerExecInspect(ctx, execID)
+}
+
+func (d *DryRunClient) ContainerExecResize(ctx context.Context, execID string, options containerType.ResizeOptions) error {
+ return d.apiClient.ContainerExecResize(ctx, execID, options)
+}
+
+func (d *DryRunClient) ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) {
+ return d.apiClient.ContainerExport(ctx, container)
+}
+
+func (d *DryRunClient) ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (containerType.InspectResponse, []byte, error) {
+ return d.apiClient.ContainerInspectWithRaw(ctx, container, getSize)
+}
+
+func (d *DryRunClient) ContainerLogs(ctx context.Context, container string, options containerType.LogsOptions) (io.ReadCloser, error) {
+ return d.apiClient.ContainerLogs(ctx, container, options)
+}
+
+func (d *DryRunClient) ContainerResize(ctx context.Context, container string, options containerType.ResizeOptions) error {
+ return d.apiClient.ContainerResize(ctx, container, options)
+}
+
+func (d *DryRunClient) ContainerStatPath(ctx context.Context, container, path string) (containerType.PathStat, error) {
+ return d.apiClient.ContainerStatPath(ctx, container, path)
+}
+
+func (d *DryRunClient) ContainerStats(ctx context.Context, container string, stream bool) (containerType.StatsResponseReader, error) {
+ return d.apiClient.ContainerStats(ctx, container, stream)
+}
+
+func (d *DryRunClient) ContainerStatsOneShot(ctx context.Context, container string) (containerType.StatsResponseReader, error) {
+ return d.apiClient.ContainerStatsOneShot(ctx, container)
+}
+
+func (d *DryRunClient) ContainerTop(ctx context.Context, container string, arguments []string) (containerType.TopResponse, error) {
+ return d.apiClient.ContainerTop(ctx, container, arguments)
+}
+
+func (d *DryRunClient) ContainerUpdate(ctx context.Context, container string, updateConfig containerType.UpdateConfig) (containerType.UpdateResponse, error) {
+ return d.apiClient.ContainerUpdate(ctx, container, updateConfig)
+}
+
+func (d *DryRunClient) ContainerWait(ctx context.Context, container string, condition containerType.WaitCondition) (<-chan containerType.WaitResponse, <-chan error) {
+ return d.apiClient.ContainerWait(ctx, container, condition)
+}
+
+func (d *DryRunClient) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (containerType.PruneReport, error) {
+ return d.apiClient.ContainersPrune(ctx, pruneFilters)
+}
+
+func (d *DryRunClient) DistributionInspect(ctx context.Context, imageName, encodedRegistryAuth string) (registry.DistributionInspect, error) {
+ return d.apiClient.DistributionInspect(ctx, imageName, encodedRegistryAuth)
+}
+
+func (d *DryRunClient) BuildCachePrune(ctx context.Context, opts build.CachePruneOptions) (*build.CachePruneReport, error) {
+ return d.apiClient.BuildCachePrune(ctx, opts)
+}
+
+func (d *DryRunClient) BuildCancel(ctx context.Context, id string) error {
+ return d.apiClient.BuildCancel(ctx, id)
+}
+
+func (d *DryRunClient) ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) {
+ return d.apiClient.ImageCreate(ctx, parentReference, options)
+}
+
+func (d *DryRunClient) ImageHistory(ctx context.Context, imageName string, options ...client.ImageHistoryOption) ([]image.HistoryResponseItem, error) {
+ return d.apiClient.ImageHistory(ctx, imageName, options...)
+}
+
+func (d *DryRunClient) ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) {
+ return d.apiClient.ImageImport(ctx, source, ref, options)
+}
+
+func (d *DryRunClient) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) {
+ return d.apiClient.ImageList(ctx, options)
+}
+
+func (d *DryRunClient) ImageLoad(ctx context.Context, input io.Reader, options ...client.ImageLoadOption) (image.LoadResponse, error) {
+ return d.apiClient.ImageLoad(ctx, input, options...)
+}
+
+func (d *DryRunClient) ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error) {
+ return d.apiClient.ImageSearch(ctx, term, options)
+}
+
+func (d *DryRunClient) ImageSave(ctx context.Context, images []string, options ...client.ImageSaveOption) (io.ReadCloser, error) {
+ return d.apiClient.ImageSave(ctx, images, options...)
+}
+
+func (d *DryRunClient) ImageTag(ctx context.Context, imageName, ref string) error {
+ return d.apiClient.ImageTag(ctx, imageName, ref)
+}
+
+func (d *DryRunClient) ImagesPrune(ctx context.Context, pruneFilter filters.Args) (image.PruneReport, error) {
+ return d.apiClient.ImagesPrune(ctx, pruneFilter)
+}
+
+func (d *DryRunClient) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
+ return d.apiClient.NodeInspectWithRaw(ctx, nodeID)
+}
+
+func (d *DryRunClient) NodeList(ctx context.Context, options swarm.NodeListOptions) ([]swarm.Node, error) {
+ return d.apiClient.NodeList(ctx, options)
+}
+
+func (d *DryRunClient) NodeRemove(ctx context.Context, nodeID string, options swarm.NodeRemoveOptions) error {
+ return d.apiClient.NodeRemove(ctx, nodeID, options)
+}
+
+func (d *DryRunClient) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error {
+ return d.apiClient.NodeUpdate(ctx, nodeID, version, node)
+}
+
+func (d *DryRunClient) NetworkInspect(ctx context.Context, networkName string, options network.InspectOptions) (network.Inspect, error) {
+ return d.apiClient.NetworkInspect(ctx, networkName, options)
+}
+
+func (d *DryRunClient) NetworkInspectWithRaw(ctx context.Context, networkName string, options network.InspectOptions) (network.Inspect, []byte, error) {
+ return d.apiClient.NetworkInspectWithRaw(ctx, networkName, options)
+}
+
+func (d *DryRunClient) NetworkList(ctx context.Context, options network.ListOptions) ([]network.Inspect, error) {
+ return d.apiClient.NetworkList(ctx, options)
+}
+
+func (d *DryRunClient) NetworksPrune(ctx context.Context, pruneFilter filters.Args) (network.PruneReport, error) {
+ return d.apiClient.NetworksPrune(ctx, pruneFilter)
+}
+
+func (d *DryRunClient) PluginList(ctx context.Context, filter filters.Args) (moby.PluginsListResponse, error) {
+ return d.apiClient.PluginList(ctx, filter)
+}
+
+func (d *DryRunClient) PluginRemove(ctx context.Context, name string, options moby.PluginRemoveOptions) error {
+ return d.apiClient.PluginRemove(ctx, name, options)
+}
+
+func (d *DryRunClient) PluginEnable(ctx context.Context, name string, options moby.PluginEnableOptions) error {
+ return d.apiClient.PluginEnable(ctx, name, options)
+}
+
+func (d *DryRunClient) PluginDisable(ctx context.Context, name string, options moby.PluginDisableOptions) error {
+ return d.apiClient.PluginDisable(ctx, name, options)
+}
+
+func (d *DryRunClient) PluginInstall(ctx context.Context, name string, options moby.PluginInstallOptions) (io.ReadCloser, error) {
+ return d.apiClient.PluginInstall(ctx, name, options)
+}
+
+func (d *DryRunClient) PluginUpgrade(ctx context.Context, name string, options moby.PluginInstallOptions) (io.ReadCloser, error) {
+ return d.apiClient.PluginUpgrade(ctx, name, options)
+}
+
+func (d *DryRunClient) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) {
+ return d.apiClient.PluginPush(ctx, name, registryAuth)
+}
+
+func (d *DryRunClient) PluginSet(ctx context.Context, name string, args []string) error {
+ return d.apiClient.PluginSet(ctx, name, args)
+}
+
+func (d *DryRunClient) PluginInspectWithRaw(ctx context.Context, name string) (*moby.Plugin, []byte, error) {
+ return d.apiClient.PluginInspectWithRaw(ctx, name)
+}
+
+func (d *DryRunClient) PluginCreate(ctx context.Context, createContext io.Reader, options moby.PluginCreateOptions) error {
+ return d.apiClient.PluginCreate(ctx, createContext, options)
+}
+
+func (d *DryRunClient) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options swarm.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) {
+ return d.apiClient.ServiceCreate(ctx, service, options)
+}
+
+func (d *DryRunClient) ServiceInspectWithRaw(ctx context.Context, serviceID string, options swarm.ServiceInspectOptions) (swarm.Service, []byte, error) {
+ return d.apiClient.ServiceInspectWithRaw(ctx, serviceID, options)
+}
+
+func (d *DryRunClient) ServiceList(ctx context.Context, options swarm.ServiceListOptions) ([]swarm.Service, error) {
+ return d.apiClient.ServiceList(ctx, options)
+}
+
+func (d *DryRunClient) ServiceRemove(ctx context.Context, serviceID string) error {
+ return d.apiClient.ServiceRemove(ctx, serviceID)
+}
+
+func (d *DryRunClient) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options swarm.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) {
+ return d.apiClient.ServiceUpdate(ctx, serviceID, version, service, options)
+}
+
+func (d *DryRunClient) ServiceLogs(ctx context.Context, serviceID string, options containerType.LogsOptions) (io.ReadCloser, error) {
+ return d.apiClient.ServiceLogs(ctx, serviceID, options)
+}
+
+func (d *DryRunClient) TaskLogs(ctx context.Context, taskID string, options containerType.LogsOptions) (io.ReadCloser, error) {
+ return d.apiClient.TaskLogs(ctx, taskID, options)
+}
+
+func (d *DryRunClient) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
+ return d.apiClient.TaskInspectWithRaw(ctx, taskID)
+}
+
+func (d *DryRunClient) TaskList(ctx context.Context, options swarm.TaskListOptions) ([]swarm.Task, error) {
+ return d.apiClient.TaskList(ctx, options)
+}
+
+func (d *DryRunClient) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) {
+ return d.apiClient.SwarmInit(ctx, req)
+}
+
+func (d *DryRunClient) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error {
+ return d.apiClient.SwarmJoin(ctx, req)
+}
+
+func (d *DryRunClient) SwarmGetUnlockKey(ctx context.Context) (swarm.UnlockKeyResponse, error) {
+ return d.apiClient.SwarmGetUnlockKey(ctx)
+}
+
+func (d *DryRunClient) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error {
+ return d.apiClient.SwarmUnlock(ctx, req)
+}
+
+func (d *DryRunClient) SwarmLeave(ctx context.Context, force bool) error {
+ return d.apiClient.SwarmLeave(ctx, force)
+}
+
+func (d *DryRunClient) SwarmInspect(ctx context.Context) (swarm.Swarm, error) {
+ return d.apiClient.SwarmInspect(ctx)
+}
+
+func (d *DryRunClient) SwarmUpdate(ctx context.Context, version swarm.Version, swarmSpec swarm.Spec, flags swarm.UpdateFlags) error {
+ return d.apiClient.SwarmUpdate(ctx, version, swarmSpec, flags)
+}
+
+func (d *DryRunClient) SecretList(ctx context.Context, options swarm.SecretListOptions) ([]swarm.Secret, error) {
+ return d.apiClient.SecretList(ctx, options)
+}
+
+func (d *DryRunClient) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (swarm.SecretCreateResponse, error) {
+ return d.apiClient.SecretCreate(ctx, secret)
+}
+
+func (d *DryRunClient) SecretRemove(ctx context.Context, id string) error {
+ return d.apiClient.SecretRemove(ctx, id)
+}
+
+func (d *DryRunClient) SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) {
+ return d.apiClient.SecretInspectWithRaw(ctx, name)
+}
+
+func (d *DryRunClient) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error {
+ return d.apiClient.SecretUpdate(ctx, id, version, secret)
+}
+
+func (d *DryRunClient) Events(ctx context.Context, options events.ListOptions) (<-chan events.Message, <-chan error) {
+ return d.apiClient.Events(ctx, options)
+}
+
+func (d *DryRunClient) Info(ctx context.Context) (system.Info, error) {
+ return d.apiClient.Info(ctx)
+}
+
+func (d *DryRunClient) RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) {
+ return d.apiClient.RegistryLogin(ctx, auth)
+}
+
+func (d *DryRunClient) DiskUsage(ctx context.Context, options moby.DiskUsageOptions) (moby.DiskUsage, error) {
+ return d.apiClient.DiskUsage(ctx, options)
+}
+
+func (d *DryRunClient) Ping(ctx context.Context) (moby.Ping, error) {
+ return d.apiClient.Ping(ctx)
+}
+
+func (d *DryRunClient) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) {
+ return d.apiClient.VolumeInspect(ctx, volumeID)
+}
+
+func (d *DryRunClient) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) {
+ return d.apiClient.VolumeInspectWithRaw(ctx, volumeID)
+}
+
+func (d *DryRunClient) VolumeList(ctx context.Context, opts volume.ListOptions) (volume.ListResponse, error) {
+ return d.apiClient.VolumeList(ctx, opts)
+}
+
+func (d *DryRunClient) VolumesPrune(ctx context.Context, pruneFilter filters.Args) (volume.PruneReport, error) {
+ return d.apiClient.VolumesPrune(ctx, pruneFilter)
+}
+
+func (d *DryRunClient) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error {
+ return d.apiClient.VolumeUpdate(ctx, volumeID, version, options)
+}
+
+func (d *DryRunClient) ClientVersion() string {
+ return d.apiClient.ClientVersion()
+}
+
+func (d *DryRunClient) DaemonHost() string {
+ return d.apiClient.DaemonHost()
+}
+
+func (d *DryRunClient) HTTPClient() *http.Client {
+ return d.apiClient.HTTPClient()
+}
+
+func (d *DryRunClient) ServerVersion(ctx context.Context) (moby.Version, error) {
+ return d.apiClient.ServerVersion(ctx)
+}
+
+func (d *DryRunClient) NegotiateAPIVersion(ctx context.Context) {
+ d.apiClient.NegotiateAPIVersion(ctx)
+}
+
+func (d *DryRunClient) NegotiateAPIVersionPing(ping moby.Ping) {
+ d.apiClient.NegotiateAPIVersionPing(ping)
+}
+
+func (d *DryRunClient) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) {
+ return d.apiClient.DialHijack(ctx, url, proto, meta)
+}
+
+func (d *DryRunClient) Dialer() func(context.Context) (net.Conn, error) {
+ return d.apiClient.Dialer()
+}
+
+func (d *DryRunClient) Close() error {
+ return d.apiClient.Close()
+}
+
+func (d *DryRunClient) CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error {
+ return d.apiClient.CheckpointCreate(ctx, container, options)
+}
+
+func (d *DryRunClient) CheckpointDelete(ctx context.Context, container string, options checkpoint.DeleteOptions) error {
+ return d.apiClient.CheckpointDelete(ctx, container, options)
+}
+
+func (d *DryRunClient) CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) {
+ return d.apiClient.CheckpointList(ctx, container, options)
+}
diff --git a/pkg/e2e/assert.go b/pkg/e2e/assert.go
new file mode 100644
index 00000000000..395e3bb9e19
--- /dev/null
+++ b/pkg/e2e/assert.go
@@ -0,0 +1,45 @@
+/*
+ Copyright 2022 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "encoding/json"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+// RequireServiceState ensures that the container is in the expected state
+// (running or exited).
+func RequireServiceState(t testing.TB, cli *CLI, service string, state string) {
+ t.Helper()
+ psRes := cli.RunDockerComposeCmd(t, "ps", "--all", "--format=json", service)
+ var svc map[string]interface{}
+ require.NoError(t, json.Unmarshal([]byte(psRes.Stdout()), &svc),
+ "Invalid `compose ps` JSON: command output: %s",
+ psRes.Combined())
+
+ require.Equal(t, service, svc["Service"],
+ "Found ps output for unexpected service")
+ require.Equalf(t,
+ strings.ToLower(state),
+ strings.ToLower(svc["State"].(string)),
+ "Service %q (%s) not in expected state",
+ service, svc["Name"],
+ )
+}
diff --git a/pkg/e2e/bridge_test.go b/pkg/e2e/bridge_test.go
new file mode 100644
index 00000000000..c4c99b8d292
--- /dev/null
+++ b/pkg/e2e/bridge_test.go
@@ -0,0 +1,61 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "gotest.tools/v3/assert"
+)
+
+func TestConvertAndTransformList(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ const projectName = "bridge"
+ const bridgeImageVersion = "v0.0.3"
+ tmpDir := t.TempDir()
+
+ t.Run("kubernetes manifests", func(t *testing.T) {
+ kubedir := filepath.Join(tmpDir, "kubernetes")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/bridge/compose.yaml", "--project-name", projectName, "bridge", "convert",
+ "--output", kubedir, "--transformation", fmt.Sprintf("docker/compose-bridge-kubernetes:%s", bridgeImageVersion))
+ assert.NilError(t, res.Error)
+ assert.Equal(t, res.ExitCode, 0)
+ res = c.RunCmd(t, "diff", "-r", kubedir, "./fixtures/bridge/expected-kubernetes")
+ assert.NilError(t, res.Error, res.Combined())
+ })
+
+ t.Run("helm charts", func(t *testing.T) {
+ helmDir := filepath.Join(tmpDir, "helm")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/bridge/compose.yaml", "--project-name", projectName, "bridge", "convert",
+ "--output", helmDir, "--transformation", fmt.Sprintf("docker/compose-bridge-helm:%s", bridgeImageVersion))
+ assert.NilError(t, res.Error)
+ assert.Equal(t, res.ExitCode, 0)
+ res = c.RunCmd(t, "diff", "-r", helmDir, "./fixtures/bridge/expected-helm")
+ assert.NilError(t, res.Error, res.Combined())
+ })
+
+ t.Run("list transformers images", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "bridge", "transformations",
+ "ls")
+ assert.Assert(t, strings.Contains(res.Stdout(), "docker/compose-bridge-helm"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Stdout(), "docker/compose-bridge-kubernetes"), res.Combined())
+ })
+}
diff --git a/pkg/e2e/build_test.go b/pkg/e2e/build_test.go
new file mode 100644
index 00000000000..1fc3ac87667
--- /dev/null
+++ b/pkg/e2e/build_test.go
@@ -0,0 +1,641 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
+ "gotest.tools/v3/poll"
+)
+
+func TestLocalComposeBuild(t *testing.T) {
+ for _, env := range []string{"DOCKER_BUILDKIT=0", "DOCKER_BUILDKIT=1"} {
+ c := NewCLI(t, WithEnv(strings.Split(env, ",")...))
+
+ t.Run(env+" build named and unnamed images", func(t *testing.T) {
+ // ensure local test run does not reuse previously build image
+ c.RunDockerOrExitError(t, "rmi", "-f", "build-test-nginx")
+ c.RunDockerOrExitError(t, "rmi", "-f", "custom-nginx")
+
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "build")
+
+ res.Assert(t, icmd.Expected{Out: "COPY static /usr/share/nginx/html"})
+ c.RunDockerCmd(t, "image", "inspect", "build-test-nginx")
+ c.RunDockerCmd(t, "image", "inspect", "custom-nginx")
+ })
+
+ t.Run(env+" build with build-arg", func(t *testing.T) {
+ // ensure local test run does not reuse previously build image
+ c.RunDockerOrExitError(t, "rmi", "-f", "build-test-nginx")
+ c.RunDockerOrExitError(t, "rmi", "-f", "custom-nginx")
+
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "build", "--build-arg", "FOO=BAR")
+
+ res := c.RunDockerCmd(t, "image", "inspect", "build-test-nginx")
+ res.Assert(t, icmd.Expected{Out: `"FOO": "BAR"`})
+ })
+
+ t.Run(env+" build with build-arg set by env", func(t *testing.T) {
+ // ensure local test run does not reuse previously build image
+ c.RunDockerOrExitError(t, "rmi", "-f", "build-test-nginx")
+ c.RunDockerOrExitError(t, "rmi", "-f", "custom-nginx")
+
+ icmd.RunCmd(c.NewDockerComposeCmd(t,
+ "--project-directory",
+ "fixtures/build-test",
+ "build",
+ "--build-arg",
+ "FOO"),
+ func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "FOO=BAR")
+ }).Assert(t, icmd.Success)
+
+ res := c.RunDockerCmd(t, "image", "inspect", "build-test-nginx")
+ res.Assert(t, icmd.Expected{Out: `"FOO": "BAR"`})
+ })
+
+ t.Run(env+" build with multiple build-args ", func(t *testing.T) {
+ // ensure local test run does not reuse previously build image
+ c.RunDockerOrExitError(t, "rmi", "-f", "multi-args-multiargs")
+ cmd := c.NewDockerComposeCmd(t, "--project-directory", "fixtures/build-test/multi-args", "build")
+
+ icmd.RunCmd(cmd, func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "DOCKER_BUILDKIT=0")
+ })
+
+ res := c.RunDockerCmd(t, "image", "inspect", "multi-args-multiargs")
+ res.Assert(t, icmd.Expected{Out: `"RESULT": "SUCCESS"`})
+ })
+
+ t.Run(env+" build as part of up", func(t *testing.T) {
+ // ensure local test run does not reuse previously build image
+ c.RunDockerOrExitError(t, "rmi", "-f", "build-test-nginx")
+ c.RunDockerOrExitError(t, "rmi", "-f", "custom-nginx")
+
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "up", "-d")
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "down")
+ })
+
+ res.Assert(t, icmd.Expected{Out: "COPY static /usr/share/nginx/html"})
+ res.Assert(t, icmd.Expected{Out: "COPY static2 /usr/share/nginx/html"})
+
+ output := HTTPGetWithRetry(t, "http://localhost:8070/", http.StatusOK, 2*time.Second, 20*time.Second)
+ assert.Assert(t, strings.Contains(output, "Hello from Nginx container"))
+
+ c.RunDockerCmd(t, "image", "inspect", "build-test-nginx")
+ c.RunDockerCmd(t, "image", "inspect", "custom-nginx")
+ })
+
+ t.Run(env+" no rebuild when up again", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "up", "-d")
+
+ assert.Assert(t, !strings.Contains(res.Stdout(), "COPY static"))
+ })
+
+ t.Run(env+" rebuild when up --build", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "up", "-d", "--build")
+
+ res.Assert(t, icmd.Expected{Out: "COPY static /usr/share/nginx/html"})
+ res.Assert(t, icmd.Expected{Out: "COPY static2 /usr/share/nginx/html"})
+ })
+
+ t.Run(env+" build --push ignored for unnamed images", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "build", "--push", "nginx")
+ assert.Assert(t, !strings.Contains(res.Stdout(), "failed to push"), res.Stdout())
+ })
+
+ t.Run(env+" build --quiet", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "build", "--quiet")
+ res.Assert(t, icmd.Expected{Out: ""})
+ })
+
+ t.Run(env+" cleanup build project", func(t *testing.T) {
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "down")
+ c.RunDockerOrExitError(t, "rmi", "-f", "build-test-nginx")
+ c.RunDockerOrExitError(t, "rmi", "-f", "custom-nginx")
+ })
+ }
+}
+
+func TestBuildSSH(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("Running on Windows. Skipping...")
+ }
+ c := NewParallelCLI(t)
+
+ t.Run("build failed with ssh default value", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "--project-directory", "fixtures/build-test", "build", "--ssh", "")
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "invalid empty ssh agent socket: make sure SSH_AUTH_SOCK is set",
+ })
+ })
+
+ t.Run("build succeed with ssh from Compose file", func(t *testing.T) {
+ c.RunDockerOrExitError(t, "rmi", "build-test-ssh")
+
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test/ssh", "build")
+ c.RunDockerCmd(t, "image", "inspect", "build-test-ssh")
+ })
+
+ t.Run("build succeed with ssh from CLI", func(t *testing.T) {
+ c.RunDockerOrExitError(t, "rmi", "build-test-ssh")
+
+ c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/ssh/compose-without-ssh.yaml", "--project-directory",
+ "fixtures/build-test/ssh", "build", "--no-cache", "--ssh", "fake-ssh=./fixtures/build-test/ssh/fake_rsa")
+ c.RunDockerCmd(t, "image", "inspect", "build-test-ssh")
+ })
+
+ /*
+ FIXME disabled waiting for https://github.com/moby/buildkit/issues/5558
+ t.Run("build failed with wrong ssh key id from CLI", func(t *testing.T) {
+ c.RunDockerOrExitError(t, "rmi", "build-test-ssh")
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/build-test/ssh/compose-without-ssh.yaml",
+ "--project-directory", "fixtures/build-test/ssh", "build", "--no-cache", "--ssh",
+ "wrong-ssh=./fixtures/build-test/ssh/fake_rsa")
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "unset ssh forward key fake-ssh",
+ })
+ })
+ */
+
+ t.Run("build succeed as part of up with ssh from Compose file", func(t *testing.T) {
+ c.RunDockerOrExitError(t, "rmi", "build-test-ssh")
+
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test/ssh", "up", "-d", "--build")
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test/ssh", "down")
+ })
+ c.RunDockerCmd(t, "image", "inspect", "build-test-ssh")
+ })
+}
+
+func TestBuildSecrets(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("skipping test on windows")
+ }
+ c := NewParallelCLI(t)
+
+ t.Run("build with secrets", func(t *testing.T) {
+ // ensure local test run does not reuse previously build image
+ c.RunDockerOrExitError(t, "rmi", "build-test-secret")
+
+ cmd := c.NewDockerComposeCmd(t, "--project-directory", "fixtures/build-test/secrets", "build")
+
+ res := icmd.RunCmd(cmd, func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "SOME_SECRET=bar")
+ })
+
+ res.Assert(t, icmd.Success)
+ })
+}
+
+func TestBuildTags(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Run("build with tags", func(t *testing.T) {
+ // ensure local test run does not reuse previously build image
+ c.RunDockerOrExitError(t, "rmi", "build-test-tags")
+
+ c.RunDockerComposeCmd(t, "--project-directory", "./fixtures/build-test/tags", "build", "--no-cache")
+
+ res := c.RunDockerCmd(t, "image", "inspect", "build-test-tags")
+ expectedOutput := `"RepoTags": [
+ "docker/build-test-tags:1.0.0",
+ "build-test-tags:latest",
+ "other-image-name:v1.0.0"
+ ],
+`
+ res.Assert(t, icmd.Expected{Out: expectedOutput})
+ })
+}
+
+func TestBuildImageDependencies(t *testing.T) {
+ doTest := func(t *testing.T, cli *CLI, args ...string) {
+ resetState := func() {
+ cli.RunDockerComposeCmd(t, "down", "--rmi=all", "-t=0")
+ res := cli.RunDockerOrExitError(t, "image", "rm", "build-dependencies-service")
+ if res.Error != nil {
+ require.Contains(t, res.Stderr(), `No such image: build-dependencies-service`)
+ }
+ }
+ resetState()
+ t.Cleanup(resetState)
+
+ // the image should NOT exist now
+ res := cli.RunDockerOrExitError(t, "image", "inspect", "build-dependencies-service")
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "No such image: build-dependencies-service",
+ })
+
+ res = cli.RunDockerComposeCmd(t, args...)
+ t.Log(res.Combined())
+
+ res = cli.RunDockerCmd(t,
+ "image", "inspect", "--format={{ index .RepoTags 0 }}",
+ "build-dependencies-service")
+ res.Assert(t, icmd.Expected{Out: "build-dependencies-service:latest"})
+
+ res = cli.RunDockerComposeCmd(t, "down", "-t0", "--rmi=all", "--remove-orphans")
+ t.Log(res.Combined())
+
+ res = cli.RunDockerOrExitError(t, "image", "inspect", "build-dependencies-service")
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "No such image: build-dependencies-service",
+ })
+ }
+
+ t.Run("ClassicBuilder", func(t *testing.T) {
+ cli := NewCLI(t, WithEnv(
+ "DOCKER_BUILDKIT=0",
+ "COMPOSE_FILE=./fixtures/build-dependencies/classic.yaml",
+ ))
+ doTest(t, cli, "build")
+ doTest(t, cli, "build", "--with-dependencies", "service")
+ })
+
+ t.Run("Bake by additional contexts", func(t *testing.T) {
+ cli := NewCLI(t, WithEnv(
+ "DOCKER_BUILDKIT=1", "COMPOSE_BAKE=1",
+ "COMPOSE_FILE=./fixtures/build-dependencies/compose.yaml",
+ ))
+ doTest(t, cli, "--verbose", "build")
+ doTest(t, cli, "--verbose", "build", "service")
+ doTest(t, cli, "--verbose", "up", "--build", "service")
+ })
+}
+
+func TestBuildPlatformsWithCorrectBuildxConfig(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("Running on Windows. Skipping...")
+ }
+ c := NewParallelCLI(t)
+
+ // declare builder
+ result := c.RunDockerCmd(t, "buildx", "create", "--name", "build-platform", "--use", "--bootstrap")
+ assert.NilError(t, result.Error)
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test/platforms", "down")
+ _ = c.RunDockerCmd(t, "buildx", "rm", "-f", "build-platform")
+ })
+
+ t.Run("platform not supported by builder", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "--project-directory", "fixtures/build-test/platforms",
+ "-f", "fixtures/build-test/platforms/compose-unsupported-platform.yml", "build")
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "no match for platform",
+ })
+ })
+
+ t.Run("multi-arch build ok", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "--project-directory", "fixtures/build-test/platforms", "build")
+ assert.NilError(t, res.Error, res.Stderr())
+ res.Assert(t, icmd.Expected{Out: "I am building for linux/arm64"})
+ res.Assert(t, icmd.Expected{Out: "I am building for linux/amd64"})
+ })
+
+ t.Run("multi-arch multi service builds ok", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "--project-directory", "fixtures/build-test/platforms",
+ "-f", "fixtures/build-test/platforms/compose-multiple-platform-builds.yaml", "build")
+ assert.NilError(t, res.Error, res.Stderr())
+ res.Assert(t, icmd.Expected{Out: "I'm Service A and I am building for linux/arm64"})
+ res.Assert(t, icmd.Expected{Out: "I'm Service A and I am building for linux/amd64"})
+ res.Assert(t, icmd.Expected{Out: "I'm Service B and I am building for linux/arm64"})
+ res.Assert(t, icmd.Expected{Out: "I'm Service B and I am building for linux/amd64"})
+ res.Assert(t, icmd.Expected{Out: "I'm Service C and I am building for linux/arm64"})
+ res.Assert(t, icmd.Expected{Out: "I'm Service C and I am building for linux/amd64"})
+ })
+
+ t.Run("multi-arch up --build", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "--project-directory", "fixtures/build-test/platforms", "up", "--build", "--menu=false")
+ assert.NilError(t, res.Error, res.Stderr())
+ res.Assert(t, icmd.Expected{Out: "platforms-1 exited with code 0"})
+ })
+
+ t.Run("use DOCKER_DEFAULT_PLATFORM value when up --build", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "--project-directory", "fixtures/build-test/platforms", "up", "--build", "--menu=false")
+ res := icmd.RunCmd(cmd, func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "DOCKER_DEFAULT_PLATFORM=linux/amd64")
+ })
+ assert.NilError(t, res.Error, res.Stderr())
+ res.Assert(t, icmd.Expected{Out: "I am building for linux/amd64"})
+ assert.Assert(t, !strings.Contains(res.Stdout(), "I am building for linux/arm64"))
+ })
+
+ t.Run("use service platform value when no build platforms defined ", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "--project-directory", "fixtures/build-test/platforms",
+ "-f", "fixtures/build-test/platforms/compose-service-platform-and-no-build-platforms.yaml", "build")
+ assert.NilError(t, res.Error, res.Stderr())
+ res.Assert(t, icmd.Expected{Out: "I am building for linux/386"})
+ })
+}
+
+func TestBuildPrivileged(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ // declare builder
+ result := c.RunDockerCmd(t, "buildx", "create", "--name", "build-privileged", "--use", "--bootstrap", "--buildkitd-flags",
+ `'--allow-insecure-entitlement=security.insecure'`)
+ assert.NilError(t, result.Error)
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test/privileged", "down")
+ _ = c.RunDockerCmd(t, "buildx", "rm", "-f", "build-privileged")
+ })
+
+ t.Run("use build privileged mode to run insecure build command", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test/privileged", "build")
+ capEffRe := regexp.MustCompile("CapEff:\t([0-9a-f]+)")
+ matches := capEffRe.FindStringSubmatch(res.Stdout())
+ assert.Equal(t, 2, len(matches), "Did not match CapEff in output, matches: %v", matches)
+
+ capEff, err := strconv.ParseUint(matches[1], 16, 64)
+ assert.NilError(t, err, "Parsing CapEff: %s", matches[1])
+
+ // NOTE: can't use constant from x/sys/unix or tests won't compile on macOS/Windows
+ // #define CAP_SYS_ADMIN 21
+ // https://github.com/torvalds/linux/blob/v6.1/include/uapi/linux/capability.h#L278
+ const capSysAdmin = 0x15
+ if capEff&capSysAdmin != capSysAdmin {
+ t.Fatalf("CapEff %s is missing CAP_SYS_ADMIN", matches[1])
+ }
+ })
+}
+
+func TestBuildPlatformsStandardErrors(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Run("no platform support with Classic Builder", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "--project-directory", "fixtures/build-test/platforms", "build")
+
+ res := icmd.RunCmd(cmd, func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "DOCKER_BUILDKIT=0")
+ })
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "the classic builder doesn't support multi-arch build, set DOCKER_BUILDKIT=1 to use BuildKit",
+ })
+ })
+
+ t.Run("builder does not support multi-arch", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "--project-directory", "fixtures/build-test/platforms", "build")
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "Multi-platform build is not supported for the docker driver.",
+ })
+ })
+
+ t.Run("service platform not defined in platforms build section", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "--project-directory", "fixtures/build-test/platforms",
+ "-f", "fixtures/build-test/platforms/compose-service-platform-not-in-build-platforms.yaml", "build")
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: `service.build.platforms MUST include service.platform "linux/riscv64"`,
+ })
+ })
+
+ t.Run("DOCKER_DEFAULT_PLATFORM value not defined in platforms build section", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "--project-directory", "fixtures/build-test/platforms", "build")
+ res := icmd.RunCmd(cmd, func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "DOCKER_DEFAULT_PLATFORM=windows/amd64")
+ })
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: `service "platforms" build.platforms does not support value set by DOCKER_DEFAULT_PLATFORM: windows/amd64`,
+ })
+ })
+
+ t.Run("no privileged support with Classic Builder", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "--project-directory", "fixtures/build-test/privileged", "build")
+
+ res := icmd.RunCmd(cmd, func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "DOCKER_BUILDKIT=0")
+ })
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "the classic builder doesn't support privileged mode, set DOCKER_BUILDKIT=1 to use BuildKit",
+ })
+ })
+}
+
+func TestBuildBuilder(t *testing.T) {
+ c := NewParallelCLI(t)
+ builderName := "build-with-builder"
+ // declare builder
+ result := c.RunDockerCmd(t, "buildx", "create", "--name", builderName, "--use", "--bootstrap")
+ assert.NilError(t, result.Error)
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test/", "down")
+ _ = c.RunDockerCmd(t, "buildx", "rm", "-f", builderName)
+ })
+
+ t.Run("use specific builder to run build command", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "--project-directory", "fixtures/build-test", "build", "--builder", builderName)
+ assert.NilError(t, res.Error, res.Stderr())
+ })
+
+ t.Run("error when using specific builder to run build command", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "--project-directory", "fixtures/build-test", "build", "--builder", "unknown-builder")
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: fmt.Sprintf(`no builder %q found`, "unknown-builder"),
+ })
+ })
+}
+
+func TestBuildEntitlements(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ // declare builder
+ result := c.RunDockerCmd(t, "buildx", "create", "--name", "build-insecure", "--use", "--bootstrap", "--buildkitd-flags",
+ `'--allow-insecure-entitlement=security.insecure'`)
+ assert.NilError(t, result.Error)
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test/entitlements", "down")
+ _ = c.RunDockerCmd(t, "buildx", "rm", "-f", "build-insecure")
+ })
+
+ t.Run("use build privileged mode to run insecure build command", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test/entitlements", "build")
+ capEffRe := regexp.MustCompile("CapEff:\t([0-9a-f]+)")
+ matches := capEffRe.FindStringSubmatch(res.Stdout())
+ assert.Equal(t, 2, len(matches), "Did not match CapEff in output, matches: %v", matches)
+
+ capEff, err := strconv.ParseUint(matches[1], 16, 64)
+ assert.NilError(t, err, "Parsing CapEff: %s", matches[1])
+
+ // NOTE: can't use constant from x/sys/unix or tests won't compile on macOS/Windows
+ // #define CAP_SYS_ADMIN 21
+ // https://github.com/torvalds/linux/blob/v6.1/include/uapi/linux/capability.h#L278
+ const capSysAdmin = 0x15
+ if capEff&capSysAdmin != capSysAdmin {
+ t.Fatalf("CapEff %s is missing CAP_SYS_ADMIN", matches[1])
+ }
+ })
+}
+
+func TestBuildDependsOn(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "-f", "fixtures/build-dependencies/compose-depends_on.yaml", "down", "--rmi=local")
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/build-dependencies/compose-depends_on.yaml", "--progress=plain", "up", "test2")
+ out := res.Combined()
+ assert.Check(t, strings.Contains(out, "test1 Built"))
+}
+
+func TestBuildSubset(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/subset/compose.yaml", "down", "--rmi=local")
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/subset/compose.yaml", "build", "main")
+ out := res.Combined()
+ assert.Check(t, strings.Contains(out, "main Built"))
+}
+
+func TestBuildDependentImage(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/dependencies/compose.yaml", "down", "--rmi=local")
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/dependencies/compose.yaml", "build", "firstbuild")
+ out := res.Combined()
+ assert.Check(t, strings.Contains(out, "firstbuild Built"))
+
+ res = c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/dependencies/compose.yaml", "build", "secondbuild")
+ out = res.Combined()
+ assert.Check(t, strings.Contains(out, "secondbuild Built"))
+}
+
+func TestBuildSubDependencies(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/sub-dependencies/compose.yaml", "down", "--rmi=local")
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/sub-dependencies/compose.yaml", "build", "main")
+ out := res.Combined()
+ assert.Check(t, strings.Contains(out, "main Built"))
+
+ res = c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/sub-dependencies/compose.yaml", "up", "--build", "main")
+ out = res.Combined()
+ assert.Check(t, strings.Contains(out, "main Built"))
+}
+
+func TestBuildLongOutputLine(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/long-output-line/compose.yaml", "down", "--rmi=local")
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/long-output-line/compose.yaml", "build", "long-line")
+ out := res.Combined()
+ assert.Check(t, strings.Contains(out, "long-line Built"))
+
+ res = c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/long-output-line/compose.yaml", "up", "--build", "long-line")
+ out = res.Combined()
+ assert.Check(t, strings.Contains(out, "long-line Built"))
+}
+
+func TestBuildDependentImageWithProfile(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/profiles/compose.yaml", "down", "--rmi=local")
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/build-test/profiles/compose.yaml", "build", "secret-build-test")
+ out := res.Combined()
+ assert.Check(t, strings.Contains(out, "secret-build-test Built"))
+}
+
+func TestBuildTLS(t *testing.T) {
+ t.Helper()
+
+ c := NewParallelCLI(t)
+ const dindBuilder = "e2e-dind-builder"
+ tmp := t.TempDir()
+
+ t.Cleanup(func() {
+ c.RunDockerCmd(t, "rm", "-f", dindBuilder)
+ c.RunDockerCmd(t, "context", "rm", dindBuilder)
+ })
+
+ c.RunDockerCmd(t, "run", "--name", dindBuilder, "--privileged", "-p", "2376:2376", "-d", "docker:dind")
+
+ poll.WaitOn(t, func(_ poll.LogT) poll.Result {
+ res := c.RunDockerCmd(t, "logs", dindBuilder)
+ if strings.Contains(res.Combined(), "API listen on [::]:2376") {
+ return poll.Success()
+ }
+ return poll.Continue("waiting for Docker daemon to be running")
+ }, poll.WithTimeout(10*time.Second))
+
+ time.Sleep(1 * time.Second) // wait for dind setup
+ c.RunDockerCmd(t, "cp", dindBuilder+":/certs/client", tmp)
+
+ c.RunDockerCmd(t, "context", "create", dindBuilder, "--docker",
+ fmt.Sprintf("host=tcp://localhost:2376,ca=%s/client/ca.pem,cert=%s/client/cert.pem,key=%s/client/key.pem,skip-tls-verify=1", tmp, tmp, tmp))
+
+ cmd := c.NewDockerComposeCmd(t, "-f", "fixtures/build-test/minimal/compose.yaml", "build")
+ cmd.Env = append(cmd.Env, "DOCKER_CONTEXT="+dindBuilder)
+ cmd.Stdout = os.Stdout
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{Err: "Built"})
+}
+
+func TestBuildEscaped(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ res := c.RunDockerComposeCmd(t, "--project-directory", "./fixtures/build-test/escaped", "build", "--no-cache", "foo")
+ res.Assert(t, icmd.Expected{Out: "foo is ${bar}"})
+
+ res = c.RunDockerComposeCmd(t, "--project-directory", "./fixtures/build-test/escaped", "build", "--no-cache", "echo")
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerComposeCmd(t, "--project-directory", "./fixtures/build-test/escaped", "build", "--no-cache", "arg")
+ res.Assert(t, icmd.Success)
+}
diff --git a/pkg/e2e/cancel_test.go b/pkg/e2e/cancel_test.go
index 4696024d3d9..51c20062f0b 100644
--- a/pkg/e2e/cancel_test.go
+++ b/pkg/e2e/cancel_test.go
@@ -20,7 +20,7 @@
package e2e
import (
- "bytes"
+ "context"
"fmt"
"os/exec"
"strings"
@@ -28,48 +28,72 @@ import (
"testing"
"time"
+ "github.com/docker/compose/v5/pkg/utils"
"gotest.tools/v3/assert"
"gotest.tools/v3/icmd"
)
func TestComposeCancel(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
t.Run("metrics on cancel Compose build", func(t *testing.T) {
- c.RunDockerComposeCmd("ls")
- buildProjectPath := "fixtures/build-infinite/compose.yaml"
+ const buildProjectPath = "fixtures/build-infinite/compose.yaml"
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
// require a separate groupID from the process running tests, in order to simulate ctrl+C from a terminal.
// sending kill signal
- cmd, stdout, stderr, err := StartWithNewGroupID(c.NewDockerCmd("compose", "-f", buildProjectPath, "build", "--progress", "plain"))
+ var stdout, stderr utils.SafeBuffer
+ cmd, err := StartWithNewGroupID(
+ ctx,
+ c.NewDockerComposeCmd(t, "-f", buildProjectPath, "build", "--progress", "plain"),
+ &stdout,
+ &stderr,
+ )
assert.NilError(t, err)
+ processDone := make(chan error, 1)
+ go func() {
+ defer close(processDone)
+ processDone <- cmd.Wait()
+ }()
- c.WaitForCondition(func() (bool, string) {
+ c.WaitForCondition(t, func() (bool, string) {
out := stdout.String()
errors := stderr.String()
- return strings.Contains(out, "RUN sleep infinity"), fmt.Sprintf("'RUN sleep infinity' not found in : \n%s\nStderr: \n%s\n", out, errors)
+ return strings.Contains(out,
+ "RUN sleep infinity"), fmt.Sprintf("'RUN sleep infinity' not found in : \n%s\nStderr: \n%s\n", out,
+ errors)
}, 30*time.Second, 1*time.Second)
- err = syscall.Kill(-cmd.Process.Pid, syscall.SIGINT) // simulate Ctrl-C : send signal to processGroup, children will have same groupId by default
-
+ // simulate Ctrl-C : send signal to processGroup, children will have same groupId by default
+ err = syscall.Kill(-cmd.Process.Pid, syscall.SIGINT)
assert.NilError(t, err)
- c.WaitForCondition(func() (bool, string) {
- out := stdout.String()
- errors := stderr.String()
- return strings.Contains(out, "CANCELED"), fmt.Sprintf("'CANCELED' not found in : \n%s\nStderr: \n%s\n", out, errors)
- }, 10*time.Second, 1*time.Second)
+
+ select {
+ case <-ctx.Done():
+ t.Fatal("test context canceled")
+ case err := <-processDone:
+ // TODO(milas): Compose should really not return exit code 130 here,
+ // this is an old hack for the compose-cli wrapper
+ assert.Error(t, err, "exit status 130",
+ "STDOUT:\n%s\nSTDERR:\n%s\n", stdout.String(), stderr.String())
+ case <-time.After(10 * time.Second):
+ t.Fatal("timeout waiting for Compose exit")
+ }
})
}
-func StartWithNewGroupID(command icmd.Cmd) (*exec.Cmd, *bytes.Buffer, *bytes.Buffer, error) {
- cmd := exec.Command(command.Command[0], command.Command[1:]...)
+func StartWithNewGroupID(ctx context.Context, command icmd.Cmd, stdout *utils.SafeBuffer, stderr *utils.SafeBuffer) (*exec.Cmd, error) {
+ cmd := exec.CommandContext(ctx, command.Command[0], command.Command[1:]...)
cmd.Env = command.Env
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
-
- var stdout bytes.Buffer
- var stderr bytes.Buffer
- cmd.Stdout = &stdout
- cmd.Stderr = &stderr
+ if stdout != nil {
+ cmd.Stdout = stdout
+ }
+ if stderr != nil {
+ cmd.Stderr = stderr
+ }
err := cmd.Start()
- return cmd, &stdout, &stderr, err
+ return cmd, err
}
diff --git a/pkg/e2e/cascade_stop_test.go b/pkg/e2e/cascade_stop_test.go
deleted file mode 100644
index e949fb4c2b9..00000000000
--- a/pkg/e2e/cascade_stop_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package e2e
-
-import (
- "testing"
-
- "gotest.tools/v3/icmd"
-)
-
-func TestCascadeStop(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
-
- const projectName = "e2e-cascade-stop"
-
- t.Run("abort-on-container-exit", func(t *testing.T) {
- res := c.RunDockerOrExitError("compose", "-f", "./fixtures/cascade-stop-test/compose.yaml", "--project-name", projectName, "up", "--abort-on-container-exit")
- res.Assert(t, icmd.Expected{ExitCode: 1, Out: `should_fail-1 exited with code 1`})
- res.Assert(t, icmd.Expected{ExitCode: 1, Out: `Aborting on container exit...`})
- })
-
- t.Run("exit-code-from", func(t *testing.T) {
- res := c.RunDockerOrExitError("compose", "-f", "./fixtures/cascade-stop-test/compose.yaml", "--project-name", projectName, "up", "--exit-code-from=sleep")
- res.Assert(t, icmd.Expected{ExitCode: 137, Out: `should_fail-1 exited with code 1`})
- res.Assert(t, icmd.Expected{ExitCode: 137, Out: `Aborting on container exit...`})
- })
-
- t.Run("exit-code-from unknown", func(t *testing.T) {
- res := c.RunDockerOrExitError("compose", "-f", "./fixtures/cascade-stop-test/compose.yaml", "--project-name", projectName, "up", "--exit-code-from=unknown")
- res.Assert(t, icmd.Expected{ExitCode: 1, Err: `no such service: unknown`})
- })
-
- t.Run("down", func(t *testing.T) {
- _ = c.RunDockerComposeCmd("--project-name", projectName, "down")
- })
-}
diff --git a/pkg/e2e/cascade_test.go b/pkg/e2e/cascade_test.go
new file mode 100644
index 00000000000..e5b2ac141e0
--- /dev/null
+++ b/pkg/e2e/cascade_test.go
@@ -0,0 +1,56 @@
+//go:build !windows
+// +build !windows
+
+/*
+ Copyright 2022 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "strings"
+ "testing"
+
+ "gotest.tools/v3/assert"
+)
+
+func TestCascadeStop(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "compose-e2e-cascade-stop"
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/cascade/compose.yaml", "--project-name", projectName,
+ "up", "--abort-on-container-exit")
+ assert.Assert(t, strings.Contains(res.Combined(), "exit-1 exited with code 0"), res.Combined())
+ // no --exit-code-from, so this is not an error
+ assert.Equal(t, res.ExitCode, 0)
+}
+
+func TestCascadeFail(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "compose-e2e-cascade-fail"
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "./fixtures/cascade/compose.yaml", "--project-name", projectName,
+ "up", "--abort-on-container-failure")
+ assert.Assert(t, strings.Contains(res.Combined(), "exit-1 exited with code 0"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "fail-1 exited with code 111"), res.Combined())
+ // failing exit code should be propagated
+ assert.Equal(t, res.ExitCode, 111)
+}
diff --git a/pkg/e2e/commit_test.go b/pkg/e2e/commit_test.go
new file mode 100644
index 00000000000..0daf130545b
--- /dev/null
+++ b/pkg/e2e/commit_test.go
@@ -0,0 +1,93 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "testing"
+)
+
+func TestCommit(t *testing.T) {
+ const projectName = "e2e-commit-service"
+ c := NewParallelCLI(t)
+
+ cleanup := func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--timeout=0", "--remove-orphans")
+ }
+ t.Cleanup(cleanup)
+ cleanup()
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/commit/compose.yaml", "--project-name", projectName, "up", "-d", "service")
+
+ c.RunDockerComposeCmd(
+ t,
+ "--project-name",
+ projectName,
+ "commit",
+ "-a",
+ "John Hannibal Smith ",
+ "-c",
+ "ENV DEBUG=true",
+ "-m",
+ "sample commit",
+ "service",
+ "service:latest",
+ )
+}
+
+func TestCommitWithReplicas(t *testing.T) {
+ const projectName = "e2e-commit-service-with-replicas"
+ c := NewParallelCLI(t)
+
+ cleanup := func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--timeout=0", "--remove-orphans")
+ }
+ t.Cleanup(cleanup)
+ cleanup()
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/commit/compose.yaml", "--project-name", projectName, "up", "-d", "service-with-replicas")
+
+ c.RunDockerComposeCmd(
+ t,
+ "--project-name",
+ projectName,
+ "commit",
+ "-a",
+ "John Hannibal Smith ",
+ "-c",
+ "ENV DEBUG=true",
+ "-m",
+ "sample commit",
+ "--index=1",
+ "service-with-replicas",
+ "service-with-replicas:1",
+ )
+ c.RunDockerComposeCmd(
+ t,
+ "--project-name",
+ projectName,
+ "commit",
+ "-a",
+ "John Hannibal Smith ",
+ "-c",
+ "ENV DEBUG=true",
+ "-m",
+ "sample commit",
+ "--index=2",
+ "service-with-replicas",
+ "service-with-replicas:2",
+ )
+}
diff --git a/pkg/e2e/compose_build_test.go b/pkg/e2e/compose_build_test.go
deleted file mode 100644
index c0fff3e48da..00000000000
--- a/pkg/e2e/compose_build_test.go
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package e2e
-
-import (
- "net/http"
- "os"
- "strings"
- "testing"
- "time"
-
- "gotest.tools/v3/assert"
- "gotest.tools/v3/icmd"
-)
-
-func TestLocalComposeBuild(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
-
- t.Run("build named and unnamed images", func(t *testing.T) {
- // ensure local test run does not reuse previously build image
- c.RunDockerOrExitError("rmi", "build-test_nginx")
- c.RunDockerOrExitError("rmi", "custom-nginx")
-
- res := c.RunDockerComposeCmd("--project-directory", "fixtures/build-test", "build")
-
- res.Assert(t, icmd.Expected{Out: "COPY static /usr/share/nginx/html"})
- c.RunDockerCmd("image", "inspect", "build-test_nginx")
- c.RunDockerCmd("image", "inspect", "custom-nginx")
- })
-
- t.Run("build with build-arg", func(t *testing.T) {
- // ensure local test run does not reuse previously build image
- c.RunDockerOrExitError("rmi", "build-test_nginx")
- c.RunDockerOrExitError("rmi", "custom-nginx")
-
- c.RunDockerComposeCmd("--project-directory", "fixtures/build-test", "build", "--build-arg", "FOO=BAR")
-
- res := c.RunDockerCmd("image", "inspect", "build-test_nginx")
- res.Assert(t, icmd.Expected{Out: `"FOO": "BAR"`})
- })
-
- t.Run("build with build-arg set by env", func(t *testing.T) {
- // ensure local test run does not reuse previously build image
- c.RunDockerOrExitError("rmi", "build-test_nginx")
- c.RunDockerOrExitError("rmi", "custom-nginx")
-
- icmd.RunCmd(c.NewDockerCmd("compose", "--project-directory", "fixtures/build-test", "build", "--build-arg", "FOO"),
- func(cmd *icmd.Cmd) {
- cmd.Env = append(cmd.Env, "FOO=BAR")
- })
-
- res := c.RunDockerCmd("image", "inspect", "build-test_nginx")
- res.Assert(t, icmd.Expected{Out: `"FOO": "BAR"`})
- })
-
- t.Run("build with multiple build-args ", func(t *testing.T) {
- // ensure local test run does not reuse previously build image
- c.RunDockerOrExitError("rmi", "-f", "multi-args_multiargs")
- cmd := c.NewDockerCmd("compose", "--project-directory", "fixtures/build-test/multi-args", "build")
-
- icmd.RunCmd(cmd, func(cmd *icmd.Cmd) {
- cmd.Env = append(cmd.Env, "DOCKER_BUILDKIT=0")
- })
-
- res := c.RunDockerCmd("image", "inspect", "multi-args_multiargs")
- res.Assert(t, icmd.Expected{Out: `"RESULT": "SUCCESS"`})
- })
-
- t.Run("build failed with ssh default value", func(t *testing.T) {
- //unset SSH_AUTH_SOCK to be sure we don't have a default value for the SSH Agent
- defaultSSHAUTHSOCK := os.Getenv("SSH_AUTH_SOCK")
- os.Unsetenv("SSH_AUTH_SOCK") //nolint:errcheck
- defer os.Setenv("SSH_AUTH_SOCK", defaultSSHAUTHSOCK) //nolint:errcheck
-
- res := c.RunDockerComposeCmdNoCheck("--project-directory", "fixtures/build-test", "build", "--ssh", "")
- res.Assert(t, icmd.Expected{
- ExitCode: 1,
- Err: "invalid empty ssh agent socket: make sure SSH_AUTH_SOCK is set",
- })
-
- })
-
- t.Run("build succeed with ssh from Compose file", func(t *testing.T) {
- c.RunDockerOrExitError("rmi", "build-test-ssh")
-
- c.RunDockerComposeCmd("--project-directory", "fixtures/build-test/ssh", "build")
- c.RunDockerCmd("image", "inspect", "build-test-ssh")
- })
-
- t.Run("build succeed with ssh from CLI", func(t *testing.T) {
- c.RunDockerOrExitError("rmi", "build-test-ssh")
-
- c.RunDockerComposeCmd("-f", "fixtures/build-test/ssh/compose-without-ssh.yaml", "--project-directory",
- "fixtures/build-test/ssh", "build", "--no-cache", "--ssh", "fake-ssh=./fixtures/build-test/ssh/fake_rsa")
- c.RunDockerCmd("image", "inspect", "build-test-ssh")
- })
-
- t.Run("build failed with wrong ssh key id from CLI", func(t *testing.T) {
- c.RunDockerOrExitError("rmi", "build-test-ssh")
-
- res := c.RunDockerComposeCmdNoCheck("-f", "fixtures/build-test/ssh/compose-without-ssh.yaml",
- "--project-directory", "fixtures/build-test/ssh", "build", "--no-cache", "--ssh",
- "wrong-ssh=./fixtures/build-test/ssh/fake_rsa")
- res.Assert(t, icmd.Expected{
- ExitCode: 17,
- Err: "failed to solve: rpc error: code = Unknown desc = unset ssh forward key fake-ssh",
- })
- })
-
- t.Run("build succeed as part of up with ssh from Compose file", func(t *testing.T) {
- c.RunDockerOrExitError("rmi", "build-test-ssh")
-
- c.RunDockerComposeCmd("--project-directory", "fixtures/build-test/ssh", "up", "-d", "--build")
- t.Cleanup(func() {
- c.RunDockerComposeCmd("--project-directory", "fixtures/build-test/ssh", "down")
- })
- c.RunDockerCmd("image", "inspect", "build-test-ssh")
- })
-
- t.Run("build as part of up", func(t *testing.T) {
- c.RunDockerOrExitError("rmi", "build-test_nginx")
- c.RunDockerOrExitError("rmi", "custom-nginx")
-
- res := c.RunDockerComposeCmd("--project-directory", "fixtures/build-test", "up", "-d")
- t.Cleanup(func() {
- c.RunDockerComposeCmd("--project-directory", "fixtures/build-test", "down")
- })
-
- res.Assert(t, icmd.Expected{Out: "COPY static /usr/share/nginx/html"})
- res.Assert(t, icmd.Expected{Out: "COPY static2 /usr/share/nginx/html"})
-
- output := HTTPGetWithRetry(t, "http://localhost:8070/", http.StatusOK, 2*time.Second, 20*time.Second)
- assert.Assert(t, strings.Contains(output, "Hello from Nginx container"))
-
- c.RunDockerCmd("image", "inspect", "build-test_nginx")
- c.RunDockerCmd("image", "inspect", "custom-nginx")
- })
-
- t.Run("no rebuild when up again", func(t *testing.T) {
- res := c.RunDockerComposeCmd("--project-directory", "fixtures/build-test", "up", "-d")
-
- assert.Assert(t, !strings.Contains(res.Stdout(), "COPY static"), res.Stdout())
- })
-
- t.Run("rebuild when up --build", func(t *testing.T) {
- res := c.RunDockerComposeCmd("--workdir", "fixtures/build-test", "up", "-d", "--build")
-
- res.Assert(t, icmd.Expected{Out: "COPY static /usr/share/nginx/html"})
- res.Assert(t, icmd.Expected{Out: "COPY static2 /usr/share/nginx/html"})
- })
-
- t.Run("cleanup build project", func(t *testing.T) {
- c.RunDockerComposeCmd("--project-directory", "fixtures/build-test", "down")
- c.RunDockerCmd("rmi", "build-test_nginx")
- c.RunDockerCmd("rmi", "custom-nginx")
- })
-}
diff --git a/pkg/e2e/compose_environment_test.go b/pkg/e2e/compose_environment_test.go
new file mode 100644
index 00000000000..62d6c9ebc5e
--- /dev/null
+++ b/pkg/e2e/compose_environment_test.go
@@ -0,0 +1,245 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "strings"
+ "testing"
+
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
+)
+
+func TestEnvPriority(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Run("up", func(t *testing.T) {
+ c.RunDockerOrExitError(t, "rmi", "env-compose-priority")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/environment/env-priority/compose-with-env.yaml",
+ "up", "-d", "--build")
+ })
+
+ // Full options activated
+ // 1. Command Line (docker compose run --env ) <-- Result expected (From OS Environment)
+ // 2. Compose File (service::environment section)
+ // 3. Compose File (service::env_file section file)
+ // 4. Container Image ENV directive
+ // 5. Variable is not defined
+ t.Run("compose file priority", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/environment/env-priority/compose-with-env.yaml",
+ "--env-file", "./fixtures/environment/env-priority/.env.override",
+ "run", "--rm", "-e", "WHEREAMI", "env-compose-priority")
+ cmd.Env = append(cmd.Env, "WHEREAMI=shell")
+ res := icmd.RunCmd(cmd)
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), "shell")
+ })
+
+ // Full options activated
+ // 1. Command Line (docker compose run --env ) <-- Result expected
+ // 2. Compose File (service::environment section)
+ // 3. Compose File (service::env_file section file)
+ // 4. Container Image ENV directive
+ // 5. Variable is not defined
+ t.Run("compose file priority", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/environment/env-priority/compose-with-env.yaml",
+ "--env-file", "./fixtures/environment/env-priority/.env.override",
+ "run", "--rm", "-e", "WHEREAMI=shell", "env-compose-priority")
+ res := icmd.RunCmd(cmd)
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), "shell")
+ })
+
+ // No Compose file, all other options
+ // 1. Command Line (docker compose run --env ) <-- Result expected (From OS Environment)
+ // 2. Compose File (service::environment section)
+ // 3. Compose File (service::env_file section file)
+ // 4. Container Image ENV directive
+ // 5. Variable is not defined
+ t.Run("shell priority", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/environment/env-priority/compose.yaml",
+ "--env-file", "./fixtures/environment/env-priority/.env.override",
+ "run", "--rm", "-e", "WHEREAMI", "env-compose-priority")
+ cmd.Env = append(cmd.Env, "WHEREAMI=shell")
+ res := icmd.RunCmd(cmd)
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), "shell")
+ })
+
+ // No Compose file, all other options with env variable from OS environment
+ // 1. Command Line (docker compose run --env ) <-- Result expected (From environment)
+ // 2. Compose File (service::environment section)
+ // 3. Compose File (service::env_file section file)
+ // 4. Container Image ENV directive
+ // 5. Variable is not defined
+ t.Run("shell priority file with default value", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/environment/env-priority/compose.yaml",
+ "--env-file", "./fixtures/environment/env-priority/.env.override.with.default",
+ "run", "--rm", "-e", "WHEREAMI", "env-compose-priority")
+ cmd.Env = append(cmd.Env, "WHEREAMI=shell")
+ res := icmd.RunCmd(cmd)
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), "shell")
+ })
+
+ // No Compose file, all other options with env variable from OS environment
+ // 1. Command Line (docker compose run --env ) <-- Result expected (From environment default value from file in --env-file)
+ // 2. Compose File (service::environment section)
+ // 3. Compose File (service::env_file section file)
+ // 4. Container Image ENV directive
+ // 5. Variable is not defined
+ t.Run("shell priority implicitly set", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/environment/env-priority/compose.yaml",
+ "--env-file", "./fixtures/environment/env-priority/.env.override.with.default",
+ "run", "--rm", "-e", "WHEREAMI", "env-compose-priority")
+ res := icmd.RunCmd(cmd)
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), "EnvFileDefaultValue")
+ })
+
+ // No Compose file, all other options with env variable from OS environment
+ // 1. Command Line (docker compose run --env ) <-- Result expected (From environment default value from file in COMPOSE_ENV_FILES)
+ // 2. Compose File (service::environment section)
+ // 3. Compose File (service::env_file section file)
+ // 4. Container Image ENV directive
+ // 5. Variable is not defined
+ t.Run("shell priority from COMPOSE_ENV_FILES variable", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/environment/env-priority/compose.yaml",
+ "run", "--rm", "-e", "WHEREAMI", "env-compose-priority")
+ cmd.Env = append(cmd.Env, "COMPOSE_ENV_FILES=./fixtures/environment/env-priority/.env.override.with.default")
+ res := icmd.RunCmd(cmd)
+ stdout := res.Stdout()
+ assert.Equal(t, strings.TrimSpace(stdout), "EnvFileDefaultValue")
+ })
+
+ // No Compose file and env variable pass to the run command
+ // 1. Command Line (docker compose run --env ) <-- Result expected
+ // 2. Compose File (service::environment section)
+ // 3. Compose File (service::env_file section file)
+ // 4. Container Image ENV directive
+ // 5. Variable is not defined
+ t.Run("shell priority from run command", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/environment/env-priority/compose.yaml",
+ "--env-file", "./fixtures/environment/env-priority/.env.override",
+ "run", "--rm", "-e", "WHEREAMI=shell-run", "env-compose-priority")
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), "shell-run")
+ })
+
+ // No Compose file & no env variable but override env file
+ // 1. Command Line (docker compose run --env ) <-- Result expected (From environment patched by .env as a default --env-file value)
+ // 2. Compose File (service::environment section)
+ // 3. Compose File (service::env_file section file)
+ // 4. Container Image ENV directive
+ // 5. Variable is not defined
+ t.Run("override env file from compose", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/environment/env-priority/compose-with-env-file.yaml",
+ "run", "--rm", "-e", "WHEREAMI", "env-compose-priority")
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), "Env File")
+ })
+
+ // No Compose file & no env variable but override by default env file
+ // 1. Command Line (docker compose run --env ) <-- Result expected (From environment patched by --env-file value)
+ // 2. Compose File (service::environment section)
+ // 3. Compose File (service::env_file section file)
+ // 4. Container Image ENV directive
+ // 5. Variable is not defined
+ t.Run("override env file", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/environment/env-priority/compose.yaml",
+ "--env-file", "./fixtures/environment/env-priority/.env.override",
+ "run", "--rm", "-e", "WHEREAMI", "env-compose-priority")
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), "override")
+ })
+
+ // No Compose file & no env variable but override env file
+ // 1. Command Line (docker compose run --env ) <-- Result expected (From environment patched by --env-file value)
+ // 2. Compose File (service::environment section)
+ // 3. Compose File (service::env_file section file)
+ // 4. Container Image ENV directive
+ // 5. Variable is not defined
+ t.Run("env file", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/environment/env-priority/compose.yaml",
+ "run", "--rm", "-e", "WHEREAMI", "env-compose-priority")
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), "Env File")
+ })
+
+ // No Compose file & no env variable, using an empty override env file
+ // 1. Command Line (docker compose run --env )
+ // 2. Compose File (service::environment section)
+ // 3. Compose File (service::env_file section file)
+ // 4. Container Image ENV directive <-- Result expected
+ // 5. Variable is not defined
+ t.Run("use Dockerfile", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/environment/env-priority/compose.yaml",
+ "--env-file", "./fixtures/environment/env-priority/.env.empty",
+ "run", "--rm", "-e", "WHEREAMI", "env-compose-priority")
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), "Dockerfile")
+ })
+
+ t.Run("down", func(t *testing.T) {
+ c.RunDockerComposeCmd(t, "--project-name", "env-priority", "down")
+ })
+}
+
+func TestEnvInterpolation(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Run("shell priority from run command", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/environment/env-interpolation/compose.yaml", "config")
+ cmd.Env = append(cmd.Env, "WHEREAMI=shell")
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{Out: `IMAGE: default_env:shell`})
+ })
+
+ t.Run("shell priority from run command using default value fallback", func(t *testing.T) {
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/environment/env-interpolation-default-value/compose.yaml", "config").
+ Assert(t, icmd.Expected{Out: `IMAGE: default_env:EnvFileDefaultValue`})
+ })
+}
+
+func TestCommentsInEnvFile(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Run("comments in env files", func(t *testing.T) {
+ c.RunDockerOrExitError(t, "rmi", "env-file-comments")
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/environment/env-file-comments/compose.yaml", "up", "-d", "--build")
+
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/environment/env-file-comments/compose.yaml",
+ "run", "--rm", "-e", "COMMENT", "-e", "NO_COMMENT", "env-file-comments")
+
+ res.Assert(t, icmd.Expected{Out: `COMMENT=1234`})
+ res.Assert(t, icmd.Expected{Out: `NO_COMMENT=1234#5`})
+
+ c.RunDockerComposeCmd(t, "--project-name", "env-file-comments", "down", "--rmi", "all")
+ })
+}
+
+func TestUnsetEnv(t *testing.T) {
+ c := NewParallelCLI(t)
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", "empty-variable", "down", "--rmi", "all")
+ })
+
+ t.Run("override env variable", func(t *testing.T) {
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/environment/empty-variable/compose.yaml", "build")
+
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/environment/empty-variable/compose.yaml",
+ "run", "-e", "EMPTY=hello", "--rm", "empty-variable")
+ res.Assert(t, icmd.Expected{Out: `=hello=`})
+ })
+
+ t.Run("unset env variable", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/environment/empty-variable/compose.yaml",
+ "run", "--rm", "empty-variable")
+ res.Assert(t, icmd.Expected{Out: `==`})
+ })
+}
diff --git a/pkg/e2e/compose_exec_test.go b/pkg/e2e/compose_exec_test.go
index 0a574456cf7..92b7e7ac74b 100644
--- a/pkg/e2e/compose_exec_test.go
+++ b/pkg/e2e/compose_exec_test.go
@@ -25,24 +25,35 @@ import (
)
func TestLocalComposeExec(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
const projectName = "compose-e2e-exec"
- c.RunDockerComposeCmd("--project-directory", "fixtures/simple-composefile", "--project-name", projectName, "up", "-d")
+ cmdArgs := func(cmd string, args ...string) []string {
+ ret := []string{"--project-directory", "fixtures/simple-composefile", "--project-name", projectName, cmd}
+ ret = append(ret, args...)
+ return ret
+ }
+
+ cleanup := func() {
+ c.RunDockerComposeCmd(t, cmdArgs("down", "--timeout=0")...)
+ }
+ cleanup()
+ t.Cleanup(cleanup)
+
+ c.RunDockerComposeCmd(t, cmdArgs("up", "-d")...)
t.Run("exec true", func(t *testing.T) {
- res := c.RunDockerOrExitError("exec", "compose-e2e-exec-simple-1", "/bin/true")
- res.Assert(t, icmd.Expected{ExitCode: 0})
+ c.RunDockerComposeCmd(t, cmdArgs("exec", "simple", "/bin/true")...)
})
t.Run("exec false", func(t *testing.T) {
- res := c.RunDockerOrExitError("exec", "compose-e2e-exec-simple-1", "/bin/false")
+ res := c.RunDockerComposeCmdNoCheck(t, cmdArgs("exec", "simple", "/bin/false")...)
res.Assert(t, icmd.Expected{ExitCode: 1})
})
t.Run("exec with env set", func(t *testing.T) {
- res := icmd.RunCmd(c.NewDockerCmd("exec", "-e", "FOO", "compose-e2e-exec-simple-1", "/usr/bin/env"),
+ res := icmd.RunCmd(c.NewDockerComposeCmd(t, cmdArgs("exec", "-e", "FOO", "simple", "/usr/bin/env")...),
func(cmd *icmd.Cmd) {
cmd.Env = append(cmd.Env, "FOO=BAR")
})
@@ -50,8 +61,34 @@ func TestLocalComposeExec(t *testing.T) {
})
t.Run("exec without env set", func(t *testing.T) {
- res := c.RunDockerOrExitError("exec", "-e", "FOO", "compose-e2e-exec-simple-1", "/usr/bin/env")
- res.Assert(t, icmd.Expected{ExitCode: 0})
- assert.Check(t, !strings.Contains(res.Stdout(), "FOO="))
+ res := c.RunDockerComposeCmd(t, cmdArgs("exec", "-e", "FOO", "simple", "/usr/bin/env")...)
+ assert.Check(t, !strings.Contains(res.Stdout(), "FOO="), res.Combined())
+ })
+}
+
+func TestLocalComposeExecOneOff(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ const projectName = "compose-e2e-exec-one-off"
+ defer c.cleanupWithDown(t, projectName)
+ cmdArgs := func(cmd string, args ...string) []string {
+ ret := []string{"--project-directory", "fixtures/simple-composefile", "--project-name", projectName, cmd}
+ ret = append(ret, args...)
+ return ret
+ }
+
+ c.RunDockerComposeCmd(t, cmdArgs("run", "-d", "simple")...)
+
+ t.Run("exec in one-off container", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, cmdArgs("exec", "-e", "FOO", "simple", "/usr/bin/env")...)
+ assert.Check(t, !strings.Contains(res.Stdout(), "FOO="), res.Combined())
+ })
+
+ t.Run("exec with index", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, cmdArgs("exec", "--index", "1", "-e", "FOO", "simple", "/usr/bin/env")...)
+ res.Assert(t, icmd.Expected{ExitCode: 1, Err: "service \"simple\" is not running container #1"})
})
+ cmdResult := c.RunDockerCmd(t, "ps", "-q", "--filter", "label=com.docker.compose.project=compose-e2e-exec-one-off").Stdout()
+ containerIDs := strings.Split(cmdResult, "\n")
+ _ = c.RunDockerOrExitError(t, append([]string{"stop"}, containerIDs...)...)
}
diff --git a/pkg/e2e/compose_run_build_once_test.go b/pkg/e2e/compose_run_build_once_test.go
new file mode 100644
index 00000000000..f9726bb3b31
--- /dev/null
+++ b/pkg/e2e/compose_run_build_once_test.go
@@ -0,0 +1,100 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "regexp"
+ "strings"
+ "testing"
+
+ "gotest.tools/v3/assert"
+)
+
+// TestRunBuildOnce tests that services with pull_policy: build are only built once
+// when using 'docker compose run', even when they are dependencies.
+// This addresses a bug where dependencies were built twice: once in startDependencies
+// and once in ensureImagesExists.
+func TestRunBuildOnce(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Run("dependency with pull_policy build is built only once", func(t *testing.T) {
+ projectName := randomProjectName("build-once")
+ _ = c.RunDockerComposeCmd(t, "-p", projectName, "-f", "./fixtures/run-test/build-once.yaml", "down", "--rmi", "local", "--remove-orphans", "-v")
+ res := c.RunDockerComposeCmd(t, "-p", projectName, "-f", "./fixtures/run-test/build-once.yaml", "--verbose", "run", "--build", "--rm", "curl")
+
+ output := res.Stdout()
+
+ nginxBuilds := countServiceBuilds(output, projectName, "nginx")
+
+ assert.Equal(t, nginxBuilds, 1, "nginx should build once, built %d times\nOutput:\n%s", nginxBuilds, output)
+ assert.Assert(t, strings.Contains(res.Stdout(), "curl service"))
+
+ c.RunDockerComposeCmd(t, "-p", projectName, "-f", "./fixtures/run-test/build-once.yaml", "down", "--remove-orphans")
+ })
+
+ t.Run("nested dependencies build only once each", func(t *testing.T) {
+ projectName := randomProjectName("build-nested")
+ _ = c.RunDockerComposeCmd(t, "-p", projectName, "-f", "./fixtures/run-test/build-once-nested.yaml", "down", "--rmi", "local", "--remove-orphans", "-v")
+ res := c.RunDockerComposeCmd(t, "-p", projectName, "-f", "./fixtures/run-test/build-once-nested.yaml", "--verbose", "run", "--build", "--rm", "app")
+
+ output := res.Stdout()
+
+ dbBuilds := countServiceBuilds(output, projectName, "db")
+ apiBuilds := countServiceBuilds(output, projectName, "api")
+ appBuilds := countServiceBuilds(output, projectName, "app")
+
+ assert.Equal(t, dbBuilds, 1, "db should build once, built %d times\nOutput:\n%s", dbBuilds, output)
+ assert.Equal(t, apiBuilds, 1, "api should build once, built %d times\nOutput:\n%s", apiBuilds, output)
+ assert.Equal(t, appBuilds, 1, "app should build once, built %d times\nOutput:\n%s", appBuilds, output)
+ assert.Assert(t, strings.Contains(output, "App running"))
+
+ c.RunDockerComposeCmd(t, "-p", projectName, "-f", "./fixtures/run-test/build-once-nested.yaml", "down", "--rmi", "local", "--remove-orphans", "-v")
+ })
+
+ t.Run("service with no dependencies builds once", func(t *testing.T) {
+ projectName := randomProjectName("build-simple")
+ _ = c.RunDockerComposeCmd(t, "-p", projectName, "-f", "./fixtures/run-test/build-once-no-deps.yaml", "down", "--rmi", "local", "--remove-orphans")
+ res := c.RunDockerComposeCmd(t, "-p", projectName, "-f", "./fixtures/run-test/build-once-no-deps.yaml", "run", "--build", "--rm", "simple")
+
+ output := res.Stdout()
+
+ simpleBuilds := countServiceBuilds(output, projectName, "simple")
+
+ assert.Equal(t, simpleBuilds, 1, "simple should build once, built %d times\nOutput:\n%s", simpleBuilds, output)
+ assert.Assert(t, strings.Contains(res.Stdout(), "Simple service"))
+
+ c.RunDockerComposeCmd(t, "-p", projectName, "-f", "./fixtures/run-test/build-once-no-deps.yaml", "down", "--remove-orphans")
+ })
+}
+
+// countServiceBuilds counts how many times a service was built by matching
+// the "naming to *{projectName}-{serviceName}* done" pattern in the output
+func countServiceBuilds(output, projectName, serviceName string) int {
+ pattern := regexp.MustCompile(`naming to .*` + regexp.QuoteMeta(projectName) + `-` + regexp.QuoteMeta(serviceName) + `.* done`)
+ return len(pattern.FindAllString(output, -1))
+}
+
+// randomProjectName generates a unique project name for parallel test execution
+// Format: prefix-<8 random hex chars> (e.g., "build-once-3f4a9b2c")
+func randomProjectName(prefix string) string {
+ b := make([]byte, 4) // 4 bytes = 8 hex chars
+ rand.Read(b) //nolint:errcheck
+ return fmt.Sprintf("%s-%s", prefix, hex.EncodeToString(b))
+}
diff --git a/pkg/e2e/compose_run_test.go b/pkg/e2e/compose_run_test.go
index fdd5803d788..7ee2313aa2b 100644
--- a/pkg/e2e/compose_run_test.go
+++ b/pkg/e2e/compose_run_test.go
@@ -26,32 +26,34 @@ import (
)
func TestLocalComposeRun(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
+ defer c.cleanupWithDown(t, "run-test")
t.Run("compose run", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/run-test/compose.yaml", "run", "back")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/compose.yaml", "run", "back")
lines := Lines(res.Stdout())
assert.Equal(t, lines[len(lines)-1], "Hello there!!", res.Stdout())
assert.Assert(t, !strings.Contains(res.Combined(), "orphan"))
- res = c.RunDockerComposeCmd("-f", "./fixtures/run-test/compose.yaml", "run", "back", "echo", "Hello one more time")
+ res = c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/compose.yaml", "run", "back", "echo",
+ "Hello one more time")
lines = Lines(res.Stdout())
assert.Equal(t, lines[len(lines)-1], "Hello one more time", res.Stdout())
- assert.Assert(t, !strings.Contains(res.Combined(), "orphan"))
+ assert.Assert(t, strings.Contains(res.Combined(), "orphan"))
})
t.Run("check run container exited", func(t *testing.T) {
- res := c.RunDockerCmd("ps", "--all")
+ res := c.RunDockerCmd(t, "ps", "--all")
lines := Lines(res.Stdout())
var runContainerID string
var truncatedSlug string
for _, line := range lines {
fields := strings.Fields(line)
containerID := fields[len(fields)-1]
- assert.Assert(t, !strings.HasPrefix(containerID, "run-test_front"))
- if strings.HasPrefix(containerID, "run-test_back") {
+ assert.Assert(t, !strings.HasPrefix(containerID, "run-test-front"))
+ if strings.HasPrefix(containerID, "run-test-back") {
// only the one-off container for back service
- assert.Assert(t, strings.HasPrefix(containerID, "run-test_back_run_"), containerID)
- truncatedSlug = strings.Replace(containerID, "run-test_back_run_", "", 1)
+ assert.Assert(t, strings.HasPrefix(containerID, "run-test-back-run-"), containerID)
+ truncatedSlug = strings.Replace(containerID, "run-test-back-run-", "", 1)
runContainerID = containerID
}
if strings.HasPrefix(containerID, "run-test-db-1") {
@@ -59,52 +61,62 @@ func TestLocalComposeRun(t *testing.T) {
}
}
assert.Assert(t, runContainerID != "")
- res = c.RunDockerCmd("inspect", runContainerID)
+ res = c.RunDockerCmd(t, "inspect", runContainerID)
res.Assert(t, icmd.Expected{Out: ` "Status": "exited"`})
- res.Assert(t, icmd.Expected{Out: `"com.docker.compose.container-number": "1"`})
res.Assert(t, icmd.Expected{Out: `"com.docker.compose.project": "run-test"`})
res.Assert(t, icmd.Expected{Out: `"com.docker.compose.oneoff": "True",`})
res.Assert(t, icmd.Expected{Out: `"com.docker.compose.slug": "` + truncatedSlug})
})
t.Run("compose run --rm", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/run-test/compose.yaml", "run", "--rm", "back", "echo", "Hello again")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/compose.yaml", "run", "--rm", "back", "echo",
+ "Hello again")
lines := Lines(res.Stdout())
assert.Equal(t, lines[len(lines)-1], "Hello again", res.Stdout())
- res = c.RunDockerCmd("ps", "--all")
- assert.Assert(t, strings.Contains(res.Stdout(), "run-test_back"), res.Stdout())
+ res = c.RunDockerCmd(t, "ps", "--all")
+ assert.Assert(t, strings.Contains(res.Stdout(), "run-test-back"), res.Stdout())
})
t.Run("down", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/run-test/compose.yaml", "down")
- res := c.RunDockerCmd("ps", "--all")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/compose.yaml", "down", "--remove-orphans")
+ res := c.RunDockerCmd(t, "ps", "--all")
assert.Assert(t, !strings.Contains(res.Stdout(), "run-test"), res.Stdout())
})
t.Run("compose run --volumes", func(t *testing.T) {
wd, err := os.Getwd()
assert.NilError(t, err)
- res := c.RunDockerComposeCmd("-f", "./fixtures/run-test/compose.yaml", "run", "--volumes", wd+":/foo", "back", "/bin/sh", "-c", "ls /foo")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/compose.yaml", "run", "--volumes", wd+":/foo",
+ "back", "/bin/sh", "-c", "ls /foo")
res.Assert(t, icmd.Expected{Out: "compose_run_test.go"})
- res = c.RunDockerCmd("ps", "--all")
- assert.Assert(t, strings.Contains(res.Stdout(), "run-test_back"), res.Stdout())
+ res = c.RunDockerCmd(t, "ps", "--all")
+ assert.Assert(t, strings.Contains(res.Stdout(), "run-test-back"), res.Stdout())
})
t.Run("compose run --publish", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/run-test/compose.yaml", "run", "--publish", "8081:80", "-d", "back", "/bin/sh", "-c", "sleep 1")
- res := c.RunDockerCmd("ps")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/ports.yaml", "run", "--publish", "8081:80", "-d", "back",
+ "/bin/sh", "-c", "sleep 1")
+ res := c.RunDockerCmd(t, "ps")
assert.Assert(t, strings.Contains(res.Stdout(), "8081->80/tcp"), res.Stdout())
+ assert.Assert(t, !strings.Contains(res.Stdout(), "8082->80/tcp"), res.Stdout())
+ })
+
+ t.Run("compose run --service-ports", func(t *testing.T) {
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/ports.yaml", "run", "--service-ports", "-d", "back",
+ "/bin/sh", "-c", "sleep 1")
+ res := c.RunDockerCmd(t, "ps")
+ assert.Assert(t, strings.Contains(res.Stdout(), "8082->80/tcp"), res.Stdout())
})
t.Run("compose run orphan", func(t *testing.T) {
// Use different compose files to get an orphan container
- c.RunDockerComposeCmd("-f", "./fixtures/run-test/orphan.yaml", "run", "simple")
- res := c.RunDockerComposeCmd("-f", "./fixtures/run-test/compose.yaml", "run", "back", "echo", "Hello")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/orphan.yaml", "run", "simple")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/compose.yaml", "run", "back", "echo", "Hello")
assert.Assert(t, strings.Contains(res.Combined(), "orphan"))
- cmd := c.NewDockerCmd("compose", "-f", "./fixtures/run-test/compose.yaml", "run", "back", "echo", "Hello")
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/run-test/compose.yaml", "run", "back", "echo", "Hello")
res = icmd.RunCmd(cmd, func(cmd *icmd.Cmd) {
cmd.Env = append(cmd.Env, "COMPOSE_IGNORE_ORPHANS=True")
})
@@ -112,9 +124,158 @@ func TestLocalComposeRun(t *testing.T) {
})
t.Run("down", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/run-test/compose.yaml", "down")
- c.RunDockerComposeCmd("-f", "./fixtures/run-test/orphan.yaml", "down")
- res := c.RunDockerCmd("ps", "--all")
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/run-test/compose.yaml", "down")
+ icmd.RunCmd(cmd, func(c *icmd.Cmd) {
+ c.Env = append(c.Env, "COMPOSE_REMOVE_ORPHANS=True")
+ })
+ res := c.RunDockerCmd(t, "ps", "--all")
+
assert.Assert(t, !strings.Contains(res.Stdout(), "run-test"), res.Stdout())
})
+
+ t.Run("run starts only container and dependencies", func(t *testing.T) {
+ // ensure that even if another service is up run does not start it: https://github.com/docker/compose/issues/9459
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/deps.yaml", "up", "service_b", "--menu=false")
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/deps.yaml", "run", "service_a")
+ assert.Assert(t, strings.Contains(res.Combined(), "shared_dep"), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), "service_b"), res.Combined())
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/deps.yaml", "down", "--remove-orphans")
+ })
+
+ t.Run("run without dependencies", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/deps.yaml", "run", "--no-deps", "service_a")
+ assert.Assert(t, !strings.Contains(res.Combined(), "shared_dep"), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), "service_b"), res.Combined())
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/deps.yaml", "down", "--remove-orphans")
+ })
+
+ t.Run("run with not required dependency", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/dependencies/deps-not-required.yaml", "run", "foo")
+ assert.Assert(t, strings.Contains(res.Combined(), "foo"), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), "bar"), res.Combined())
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/dependencies/deps-not-required.yaml", "down", "--remove-orphans")
+ })
+
+ t.Run("--quiet-pull", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/quiet-pull.yaml", "down", "--remove-orphans", "--rmi", "all")
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/quiet-pull.yaml", "run", "--quiet-pull", "backend")
+ assert.Assert(t, !strings.Contains(res.Combined(), "Pull complete"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "Pulled"), res.Combined())
+ })
+
+ t.Run("COMPOSE_PROGRESS quiet", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/quiet-pull.yaml", "down", "--remove-orphans", "--rmi", "all")
+ res.Assert(t, icmd.Success)
+
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/run-test/quiet-pull.yaml", "run", "backend")
+ res = icmd.RunCmd(cmd, func(c *icmd.Cmd) {
+ c.Env = append(c.Env, "COMPOSE_PROGRESS=quiet")
+ })
+ assert.Assert(t, !strings.Contains(res.Combined(), "Pull complete"), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), "Pulled"), res.Combined())
+ })
+
+ t.Run("--pull", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/pull.yaml", "down", "--remove-orphans", "--rmi", "all")
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/pull.yaml", "run", "--pull", "always", "backend")
+ assert.Assert(t, strings.Contains(res.Combined(), "Image nginx Pulling"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "Image nginx Pulled"), res.Combined())
+ })
+
+ t.Run("compose run --env-from-file", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/compose.yaml", "run", "--env-from-file", "./fixtures/run-test/run.env",
+ "front", "env")
+ res.Assert(t, icmd.Expected{Out: "FOO=BAR"})
+ })
+
+ t.Run("compose run -rm with stop signal", func(t *testing.T) {
+ projectName := "run-test"
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "-f", "./fixtures/ps-test/compose.yaml", "run", "--rm", "-d", "nginx")
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerCmd(t, "ps", "--quiet", "--filter", "name=run-test-nginx")
+ containerID := strings.TrimSpace(res.Stdout())
+
+ res = c.RunDockerCmd(t, "stop", containerID)
+ res.Assert(t, icmd.Success)
+ res = c.RunDockerCmd(t, "ps", "--all", "--filter", "name=run-test-nginx", "--format", "'{{.Names}}'")
+ assert.Assert(t, !strings.Contains(res.Stdout(), "run-test-nginx"), res.Stdout())
+ })
+
+ t.Run("compose run --env", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/compose.yaml", "run", "--env", "FOO=BAR",
+ "front", "env")
+ res.Assert(t, icmd.Expected{Out: "FOO=BAR"})
+ })
+
+ t.Run("compose run --build", func(t *testing.T) {
+ c.cleanupWithDown(t, "run-test", "--rmi=local")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/compose.yaml", "run", "build", "echo", "hello world")
+ res.Assert(t, icmd.Expected{Out: "hello world"})
+ })
+
+ t.Run("compose run with piped input detection", func(t *testing.T) {
+ if composeStandaloneMode {
+ t.Skip("Skipping test compose with piped input detection in standalone mode")
+ }
+ // Test that piped input is properly detected and TTY is automatically disabled
+ // This tests the logic added in run.go that checks dockerCli.In().IsTerminal()
+ cmd := c.NewCmd("sh", "-c", "echo 'piped-content' | docker compose -f ./fixtures/run-test/piped-test.yaml run --rm piped-test")
+ res := icmd.RunCmd(cmd)
+
+ res.Assert(t, icmd.Expected{Out: "piped-content"})
+ res.Assert(t, icmd.Success)
+ })
+
+ t.Run("compose run piped input should not allocate TTY", func(t *testing.T) {
+ if composeStandaloneMode {
+ t.Skip("Skipping test compose with piped input detection in standalone mode")
+ }
+ // Test that when stdin is piped, the container correctly detects no TTY
+ // This verifies that the automatic noTty=true setting works correctly
+ cmd := c.NewCmd("sh", "-c", "echo '' | docker compose -f ./fixtures/run-test/piped-test.yaml run --rm tty-test")
+ res := icmd.RunCmd(cmd)
+
+ res.Assert(t, icmd.Expected{Out: "No TTY detected"})
+ res.Assert(t, icmd.Success)
+ })
+
+ t.Run("compose run piped input with explicit --tty should fail", func(t *testing.T) {
+ if composeStandaloneMode {
+ t.Skip("Skipping test compose with piped input detection in standalone mode")
+ }
+ // Test that explicitly requesting TTY with piped input fails with proper error message
+ // This should trigger the "input device is not a TTY" error
+ cmd := c.NewCmd("sh", "-c", "echo 'test' | docker compose -f ./fixtures/run-test/piped-test.yaml run --rm --tty piped-test")
+ res := icmd.RunCmd(cmd)
+
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "the input device is not a TTY",
+ })
+ })
+
+ t.Run("compose run piped input with --no-TTY=false should fail", func(t *testing.T) {
+ if composeStandaloneMode {
+ t.Skip("Skipping test compose with piped input detection in standalone mode")
+ }
+ // Test that explicitly disabling --no-TTY (i.e., requesting TTY) with piped input fails
+ // This should also trigger the "input device is not a TTY" error
+ cmd := c.NewCmd("sh", "-c", "echo 'test' | docker compose -f ./fixtures/run-test/piped-test.yaml run --rm --no-TTY=false piped-test")
+ res := icmd.RunCmd(cmd)
+
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "the input device is not a TTY",
+ })
+ })
}
diff --git a/pkg/e2e/compose_test.go b/pkg/e2e/compose_test.go
index d3497b7d95d..3b5a7341a70 100644
--- a/pkg/e2e/compose_test.go
+++ b/pkg/e2e/compose_test.go
@@ -18,7 +18,6 @@ package e2e
import (
"fmt"
- "io/ioutil"
"net/http"
"os"
"path/filepath"
@@ -31,31 +30,30 @@ import (
"gotest.tools/v3/icmd"
)
-var binDir string
-
func TestLocalComposeUp(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ // this test shares a fixture with TestCompatibility and can't run at the same time
+ c := NewCLI(t)
const projectName = "compose-e2e-demo"
t.Run("up", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/sentences/compose.yaml", "--project-name", projectName, "up", "-d")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/sentences/compose.yaml", "--project-name", projectName, "up", "-d")
})
t.Run("check accessing running app", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-p", projectName, "ps")
+ res := c.RunDockerComposeCmd(t, "-p", projectName, "ps")
res.Assert(t, icmd.Expected{Out: `web`})
endpoint := "http://localhost:90/"
output := HTTPGetWithRetry(t, endpoint+"/words/noun", http.StatusOK, 2*time.Second, 20*time.Second)
assert.Assert(t, strings.Contains(output, `"word":`))
- res = c.RunDockerCmd("network", "ls")
+ res = c.RunDockerCmd(t, "network", "ls")
res.Assert(t, icmd.Expected{Out: projectName + "_default"})
})
t.Run("top", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-p", projectName, "top")
+ res := c.RunDockerComposeCmd(t, "-p", projectName, "top")
output := res.Stdout()
head := []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"}
for _, h := range head {
@@ -66,7 +64,7 @@ func TestLocalComposeUp(t *testing.T) {
})
t.Run("check compose labels", func(t *testing.T) {
- res := c.RunDockerCmd("inspect", projectName+"-web-1")
+ res := c.RunDockerCmd(t, "inspect", projectName+"-web-1")
res.Assert(t, icmd.Expected{Out: `"com.docker.compose.container-number": "1"`})
res.Assert(t, icmd.Expected{Out: `"com.docker.compose.project": "compose-e2e-demo"`})
res.Assert(t, icmd.Expected{Out: `"com.docker.compose.oneoff": "False",`})
@@ -76,159 +74,177 @@ func TestLocalComposeUp(t *testing.T) {
res.Assert(t, icmd.Expected{Out: `"com.docker.compose.service": "web"`})
res.Assert(t, icmd.Expected{Out: `"com.docker.compose.version":`})
- res = c.RunDockerCmd("network", "inspect", projectName+"_default")
+ res = c.RunDockerCmd(t, "network", "inspect", projectName+"_default")
res.Assert(t, icmd.Expected{Out: `"com.docker.compose.network": "default"`})
res.Assert(t, icmd.Expected{Out: `"com.docker.compose.project": `})
res.Assert(t, icmd.Expected{Out: `"com.docker.compose.version": `})
})
t.Run("check user labels", func(t *testing.T) {
- res := c.RunDockerCmd("inspect", projectName+"-web-1")
+ res := c.RunDockerCmd(t, "inspect", projectName+"-web-1")
res.Assert(t, icmd.Expected{Out: `"my-label": "test"`})
-
})
t.Run("check healthcheck output", func(t *testing.T) {
- c.WaitForCmdResult(c.NewDockerCmd("compose", "-p", projectName, "ps", "--format", "json"),
- StdoutContains(`"Name":"compose-e2e-demo-web-1","Command":"/dispatcher","Project":"compose-e2e-demo","Service":"web","State":"running","Health":"healthy"`),
+ c.WaitForCmdResult(t, c.NewDockerComposeCmd(t, "-p", projectName, "ps", "--format", "json"),
+ IsHealthy(projectName+"-web-1"),
5*time.Second, 1*time.Second)
- res := c.RunDockerComposeCmd("-p", projectName, "ps")
- res.Assert(t, icmd.Expected{Out: `NAME COMMAND SERVICE STATUS PORTS`})
- res.Assert(t, icmd.Expected{Out: `compose-e2e-demo-web-1 "/dispatcher" web running (healthy) 0.0.0.0:90->80/tcp, :::90->80/tcp`})
- res.Assert(t, icmd.Expected{Out: `compose-e2e-demo-db-1 "docker-entrypoint.s…" db running 5432/tcp`})
+ res := c.RunDockerComposeCmd(t, "-p", projectName, "ps")
+ assertServiceStatus(t, projectName, "web", "(healthy)", res.Stdout())
})
t.Run("images", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-p", projectName, "images")
+ res := c.RunDockerComposeCmd(t, "-p", projectName, "images")
res.Assert(t, icmd.Expected{Out: `compose-e2e-demo-db-1 gtardif/sentences-db latest`})
res.Assert(t, icmd.Expected{Out: `compose-e2e-demo-web-1 gtardif/sentences-web latest`})
res.Assert(t, icmd.Expected{Out: `compose-e2e-demo-words-1 gtardif/sentences-api latest`})
})
+ t.Run("down SERVICE", func(t *testing.T) {
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "web")
+
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "ps")
+ assert.Assert(t, !strings.Contains(res.Combined(), "compose-e2e-demo-web-1"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "compose-e2e-demo-db-1"), res.Combined())
+ })
+
t.Run("down", func(t *testing.T) {
- _ = c.RunDockerComposeCmd("--project-name", projectName, "down")
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
})
t.Run("check containers after down", func(t *testing.T) {
- res := c.RunDockerCmd("ps", "--all")
+ res := c.RunDockerCmd(t, "ps", "--all")
assert.Assert(t, !strings.Contains(res.Combined(), projectName), res.Combined())
})
t.Run("check networks after down", func(t *testing.T) {
- res := c.RunDockerCmd("network", "ls")
+ res := c.RunDockerCmd(t, "network", "ls")
assert.Assert(t, !strings.Contains(res.Combined(), projectName), res.Combined())
})
}
-func TestComposePull(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
-
- res := c.RunDockerOrExitError("compose", "--project-directory", "fixtures/simple-composefile", "pull")
- output := res.Combined()
-
- assert.Assert(t, strings.Contains(output, "simple Pulled"))
- assert.Assert(t, strings.Contains(output, "another Pulled"))
-}
-
func TestDownComposefileInParentFolder(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
- tmpFolder, err := ioutil.TempDir("fixtures/simple-composefile", "test-tmp")
+ tmpFolder, err := os.MkdirTemp("fixtures/simple-composefile", "test-tmp")
assert.NilError(t, err)
- defer os.Remove(tmpFolder) // nolint: errcheck
+ defer os.Remove(tmpFolder) //nolint:errcheck
projectName := filepath.Base(tmpFolder)
- res := c.RunDockerComposeCmd("--project-directory", tmpFolder, "up", "-d")
+ res := c.RunDockerComposeCmd(t, "--project-directory", tmpFolder, "up", "-d")
res.Assert(t, icmd.Expected{Err: "Started", ExitCode: 0})
- res = c.RunDockerComposeCmd("-p", projectName, "down")
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "down")
res.Assert(t, icmd.Expected{Err: "Removed", ExitCode: 0})
}
func TestAttachRestart(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ t.Skip("Skipping test until we can fix it")
- cmd := c.NewDockerCmd("compose", "--ansi=never", "--project-directory", "./fixtures/attach-restart", "up")
+ if _, ok := os.LookupEnv("CI"); ok {
+ t.Skip("Skipping test on CI... flaky")
+ }
+ c := NewParallelCLI(t)
+
+ cmd := c.NewDockerComposeCmd(t, "--ansi=never", "--project-directory", "./fixtures/attach-restart", "up")
res := icmd.StartCmd(cmd)
- defer c.RunDockerOrExitError("compose", "-p", "attach-restart", "down")
+ defer c.RunDockerComposeCmd(t, "-p", "attach-restart", "down")
- c.WaitForCondition(func() (bool, string) {
+ c.WaitForCondition(t, func() (bool, string) {
debug := res.Combined()
- return strings.Count(res.Stdout(), "failing-1 exited with code 1") == 3, fmt.Sprintf("'failing-1 exited with code 1' not found 3 times in : \n%s\n", debug)
- }, 2*time.Minute, 2*time.Second)
+ return strings.Count(res.Stdout(),
+ "failing-1 exited with code 1") == 3, fmt.Sprintf("'failing-1 exited with code 1' not found 3 times in : \n%s\n",
+ debug)
+ }, 4*time.Minute, 2*time.Second)
assert.Equal(t, strings.Count(res.Stdout(), "failing-1 | world"), 3, res.Combined())
}
func TestInitContainer(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
- res := c.RunDockerOrExitError("compose", "--ansi=never", "--project-directory", "./fixtures/init-container", "up")
- defer c.RunDockerOrExitError("compose", "-p", "init-container", "down")
+ res := c.RunDockerComposeCmd(t, "--ansi=never", "--project-directory", "./fixtures/init-container", "up", "--menu=false")
+ defer c.RunDockerComposeCmd(t, "-p", "init-container", "down")
testify.Regexp(t, "foo-1 | hello(?m:.*)bar-1 | world", res.Stdout())
}
func TestRm(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
const projectName = "compose-e2e-rm"
t.Run("up", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/simple-composefile/compose.yaml", "-p", projectName, "up", "-d")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/simple-composefile/compose.yaml", "-p", projectName, "up", "-d")
})
- t.Run("rm -sf", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/simple-composefile/compose.yaml", "-p", projectName, "rm", "-sf", "simple")
+ t.Run("rm --stop --force simple", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/simple-composefile/compose.yaml", "-p", projectName, "rm",
+ "--stop", "--force", "simple")
res.Assert(t, icmd.Expected{Err: "Removed", ExitCode: 0})
})
- t.Run("check containers after rm -sf", func(t *testing.T) {
- res := c.RunDockerCmd("ps", "--all")
- assert.Assert(t, !strings.Contains(res.Combined(), projectName+"_simple"), res.Combined())
+ t.Run("check containers after rm", func(t *testing.T) {
+ res := c.RunDockerCmd(t, "ps", "--all")
+ assert.Assert(t, !strings.Contains(res.Combined(), projectName+"-simple"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), projectName+"-another"), res.Combined())
+ })
+
+ t.Run("up (again)", func(t *testing.T) {
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/simple-composefile/compose.yaml", "-p", projectName, "up", "-d")
})
- t.Run("rm -sf ", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/simple-composefile/compose.yaml", "-p", projectName, "rm", "-sf", "simple")
+ t.Run("rm ---stop --force ", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/simple-composefile/compose.yaml", "-p", projectName, "rm",
+ "--stop", "--force")
res.Assert(t, icmd.Expected{ExitCode: 0})
})
+ t.Run("check containers after rm", func(t *testing.T) {
+ res := c.RunDockerCmd(t, "ps", "--all")
+ assert.Assert(t, !strings.Contains(res.Combined(), projectName+"-simple"), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), projectName+"-another"), res.Combined())
+ })
+
t.Run("down", func(t *testing.T) {
- c.RunDockerComposeCmd("-p", projectName, "down")
+ c.RunDockerComposeCmd(t, "-p", projectName, "down")
})
}
func TestCompatibility(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ // this test shares a fixture with TestLocalComposeUp and can't run at the same time
+ c := NewCLI(t)
const projectName = "compose-e2e-compatibility"
t.Run("up", func(t *testing.T) {
- c.RunDockerComposeCmd("--compatibility", "-f", "./fixtures/sentences/compose.yaml", "--project-name", projectName, "up", "-d")
+ c.RunDockerComposeCmd(t, "--compatibility", "-f", "./fixtures/sentences/compose.yaml", "--project-name",
+ projectName, "up", "-d")
})
t.Run("check container names", func(t *testing.T) {
- res := c.RunDockerCmd("ps", "--format", "{{.Names}}")
+ res := c.RunDockerCmd(t, "ps", "--format", "{{.Names}}")
res.Assert(t, icmd.Expected{Out: "compose-e2e-compatibility_web_1"})
res.Assert(t, icmd.Expected{Out: "compose-e2e-compatibility_words_1"})
res.Assert(t, icmd.Expected{Out: "compose-e2e-compatibility_db_1"})
})
t.Run("down", func(t *testing.T) {
- c.RunDockerComposeCmd("-p", projectName, "down")
+ c.RunDockerComposeCmd(t, "-p", projectName, "down")
})
}
-func TestConvert(t *testing.T) {
- const projectName = "compose-e2e-convert"
- c := NewParallelE2eCLI(t, binDir)
+func TestConfig(t *testing.T) {
+ const projectName = "compose-e2e-config"
+ c := NewParallelCLI(t)
wd, err := os.Getwd()
assert.NilError(t, err)
t.Run("up", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/simple-build-test/compose.yaml", "-p", projectName, "convert")
- res.Assert(t, icmd.Expected{Out: fmt.Sprintf(`services:
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/simple-build-test/compose.yaml", "-p", projectName, "config")
+ res.Assert(t, icmd.Expected{Out: fmt.Sprintf(`name: %s
+services:
nginx:
build:
context: %s
@@ -237,6 +253,156 @@ func TestConvert(t *testing.T) {
default: null
networks:
default:
- name: compose-e2e-convert_default`, filepath.Join(wd, "fixtures", "simple-build-test", "nginx-build")), ExitCode: 0})
+ name: compose-e2e-config_default
+`, projectName, filepath.Join(wd, "fixtures", "simple-build-test", "nginx-build")), ExitCode: 0})
+ })
+}
+
+func TestConfigInterpolate(t *testing.T) {
+ const projectName = "compose-e2e-config-interpolate"
+ c := NewParallelCLI(t)
+
+ wd, err := os.Getwd()
+ assert.NilError(t, err)
+
+ t.Run("config", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/simple-build-test/compose-interpolate.yaml", "-p", projectName, "config", "--no-interpolate")
+ res.Assert(t, icmd.Expected{Out: fmt.Sprintf(`name: %s
+networks:
+ default:
+ name: compose-e2e-config-interpolate_default
+services:
+ nginx:
+ build:
+ context: %s
+ dockerfile: ${MYVAR}
+ networks:
+ default: null
+`, projectName, filepath.Join(wd, "fixtures", "simple-build-test", "nginx-build")), ExitCode: 0})
+ })
+}
+
+func TestStopWithDependenciesAttached(t *testing.T) {
+ const projectName = "compose-e2e-stop-with-deps"
+ c := NewParallelCLI(t, WithEnv("COMMAND=echo hello"))
+
+ cleanup := func() {
+ c.RunDockerComposeCmd(t, "-p", projectName, "down", "--remove-orphans", "--timeout=0")
+ }
+ cleanup()
+ t.Cleanup(cleanup)
+
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/dependencies/compose.yaml", "-p", projectName, "up", "--attach-dependencies", "foo", "--menu=false")
+ res.Assert(t, icmd.Expected{Out: "exited with code 0"})
+}
+
+func TestRemoveOrphaned(t *testing.T) {
+ const projectName = "compose-e2e-remove-orphaned"
+ c := NewParallelCLI(t)
+
+ cleanup := func() {
+ c.RunDockerComposeCmd(t, "-p", projectName, "down", "--remove-orphans", "--timeout=0")
+ }
+ cleanup()
+ t.Cleanup(cleanup)
+
+ // run stack
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/sentences/compose.yaml", "-p", projectName, "up", "-d")
+
+ // down "web" service with orphaned removed
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/sentences/compose.yaml", "-p", projectName, "down", "--remove-orphans", "web")
+
+ // check "words" service has not been considered orphaned
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/sentences/compose.yaml", "-p", projectName, "ps", "--format", "{{.Name}}")
+ res.Assert(t, icmd.Expected{Out: fmt.Sprintf("%s-words-1", projectName)})
+}
+
+func TestComposeFileSetByDotEnv(t *testing.T) {
+ c := NewCLI(t)
+ defer c.cleanupWithDown(t, "dotenv")
+
+ cmd := c.NewDockerComposeCmd(t, "config")
+ cmd.Dir = filepath.Join(".", "fixtures", "dotenv")
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{
+ ExitCode: 0,
+ Out: "image: test:latest",
})
+ res.Assert(t, icmd.Expected{
+ Out: "image: enabled:profile",
+ })
+}
+
+func TestComposeFileSetByProjectDirectory(t *testing.T) {
+ c := NewCLI(t)
+ defer c.cleanupWithDown(t, "dotenv")
+
+ dir := filepath.Join(".", "fixtures", "dotenv", "development")
+ cmd := c.NewDockerComposeCmd(t, "--project-directory", dir, "config")
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{
+ ExitCode: 0,
+ Out: "image: backend:latest",
+ })
+}
+
+func TestComposeFileSetByEnvFile(t *testing.T) {
+ c := NewCLI(t)
+ defer c.cleanupWithDown(t, "dotenv")
+
+ dotEnv, err := os.CreateTemp(t.TempDir(), ".env")
+ assert.NilError(t, err)
+ err = os.WriteFile(dotEnv.Name(), []byte(`
+COMPOSE_FILE=fixtures/dotenv/development/compose.yaml
+IMAGE_NAME=test
+IMAGE_TAG=latest
+COMPOSE_PROFILES=test
+`), 0o700)
+ assert.NilError(t, err)
+
+ cmd := c.NewDockerComposeCmd(t, "--env-file", dotEnv.Name(), "config")
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{
+ Out: "image: test:latest",
+ })
+ res.Assert(t, icmd.Expected{
+ Out: "image: enabled:profile",
+ })
+}
+
+func TestNestedDotEnv(t *testing.T) {
+ c := NewCLI(t)
+ defer c.cleanupWithDown(t, "nested")
+
+ cmd := c.NewDockerComposeCmd(t, "run", "echo")
+ cmd.Dir = filepath.Join(".", "fixtures", "nested")
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{
+ ExitCode: 0,
+ Out: "root win=root",
+ })
+
+ cmd = c.NewDockerComposeCmd(t, "run", "echo")
+ cmd.Dir = filepath.Join(".", "fixtures", "nested", "sub")
+ defer c.cleanupWithDown(t, "nested")
+ res = icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{
+ ExitCode: 0,
+ Out: "root sub win=sub",
+ })
+}
+
+func TestUnnecessaryResources(t *testing.T) {
+ const projectName = "compose-e2e-unnecessary-resources"
+ c := NewParallelCLI(t)
+ defer c.cleanupWithDown(t, projectName)
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "./fixtures/external/compose.yaml", "-p", projectName, "up", "-d")
+ res.Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "network foo_bar declared as external, but could not be found",
+ })
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/external/compose.yaml", "-p", projectName, "up", "-d", "test")
+ // Should not fail as missing external network is not used
}
diff --git a/pkg/e2e/compose_up_test.go b/pkg/e2e/compose_up_test.go
new file mode 100644
index 00000000000..b00cb3c1861
--- /dev/null
+++ b/pkg/e2e/compose_up_test.go
@@ -0,0 +1,114 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "strings"
+ "testing"
+ "time"
+
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
+)
+
+func TestUpWait(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "e2e-deps-wait"
+
+ timeout := time.After(30 * time.Second)
+ done := make(chan bool)
+ go func() {
+ //nolint:nolintlint,testifylint // helper asserts inside goroutine; acceptable in this e2e test
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/dependencies/deps-completed-successfully.yaml", "--project-name", projectName, "up", "--wait", "-d")
+ assert.Assert(t, strings.Contains(res.Combined(), "e2e-deps-wait-oneshot-1"), res.Combined())
+ done <- true
+ }()
+
+ select {
+ case <-timeout:
+ t.Fatal("test did not finish in time")
+ case <-done:
+ break
+ }
+
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+}
+
+func TestUpExitCodeFrom(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "e2e-exit-code-from"
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/start-fail/start-depends_on-long-lived.yaml", "--project-name", projectName, "up", "--menu=false", "--exit-code-from=failure", "failure")
+ res.Assert(t, icmd.Expected{ExitCode: 42})
+
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--remove-orphans")
+}
+
+func TestUpExitCodeFromContainerKilled(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "e2e-exit-code-from-kill"
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/start-fail/start-depends_on-long-lived.yaml", "--project-name", projectName, "up", "--menu=false", "--exit-code-from=test")
+ res.Assert(t, icmd.Expected{ExitCode: 143})
+
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--remove-orphans")
+}
+
+func TestPortRange(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "e2e-port-range"
+
+ reset := func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--remove-orphans", "--timeout=0")
+ }
+ reset()
+ t.Cleanup(reset)
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/port-range/compose.yaml", "--project-name", projectName, "up", "-d")
+ res.Assert(t, icmd.Success)
+}
+
+func TestStdoutStderr(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "e2e-stdout-stderr"
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/stdout-stderr/compose.yaml", "--project-name", projectName, "up", "--menu=false")
+ res.Assert(t, icmd.Expected{Out: "log to stdout", Err: "log to stderr"})
+
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--remove-orphans")
+}
+
+func TestLoggingDriver(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "e2e-logging-driver"
+ defer c.cleanupWithDown(t, projectName)
+
+ host := "HOST=127.0.0.1"
+ res := c.RunDockerCmd(t, "info", "-f", "{{.OperatingSystem}}")
+ os := res.Stdout()
+ if strings.TrimSpace(os) == "Docker Desktop" {
+ host = "HOST=host.docker.internal"
+ }
+
+ cmd := c.NewDockerComposeCmd(t, "-f", "fixtures/logging-driver/compose.yaml", "--project-name", projectName, "up", "-d")
+ cmd.Env = append(cmd.Env, host, "BAR=foo")
+ icmd.RunCmd(cmd).Assert(t, icmd.Success)
+
+ cmd = c.NewDockerComposeCmd(t, "-f", "fixtures/logging-driver/compose.yaml", "--project-name", projectName, "up", "-d")
+ cmd.Env = append(cmd.Env, host, "BAR=zot")
+ icmd.RunCmd(cmd).Assert(t, icmd.Success)
+}
diff --git a/pkg/e2e/config_test.go b/pkg/e2e/config_test.go
new file mode 100644
index 00000000000..15d3e3d932a
--- /dev/null
+++ b/pkg/e2e/config_test.go
@@ -0,0 +1,70 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "testing"
+
+ "gotest.tools/v3/icmd"
+)
+
+func TestLocalComposeConfig(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ const projectName = "compose-e2e-config"
+
+ t.Run("yaml", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/config/compose.yaml", "--project-name", projectName, "config")
+ res.Assert(t, icmd.Expected{Out: `
+ ports:
+ - mode: ingress
+ target: 80
+ published: "8080"
+ protocol: tcp`})
+ })
+
+ t.Run("json", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/config/compose.yaml", "--project-name", projectName, "config", "--format", "json")
+ res.Assert(t, icmd.Expected{Out: `"published": "8080"`})
+ })
+
+ t.Run("--no-interpolate", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/config/compose.yaml", "--project-name", projectName, "config", "--no-interpolate")
+ res.Assert(t, icmd.Expected{Out: `- ${PORT:-8080}:80`})
+ })
+
+ t.Run("--variables --format json", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/config/compose.yaml", "--project-name", projectName, "config", "--variables", "--format", "json")
+ res.Assert(t, icmd.Expected{Out: `{
+ "PORT": {
+ "Name": "PORT",
+ "DefaultValue": "8080",
+ "PresenceValue": "",
+ "Required": false
+ }
+}`})
+ })
+
+ t.Run("--variables --format yaml", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/config/compose.yaml", "--project-name", projectName, "config", "--variables", "--format", "yaml")
+ res.Assert(t, icmd.Expected{Out: `PORT:
+ name: PORT
+ defaultvalue: "8080"
+ presencevalue: ""
+ required: false`})
+ })
+}
diff --git a/pkg/e2e/configs_test.go b/pkg/e2e/configs_test.go
new file mode 100644
index 00000000000..c7d86f1c765
--- /dev/null
+++ b/pkg/e2e/configs_test.go
@@ -0,0 +1,57 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "testing"
+
+ "gotest.tools/v3/icmd"
+)
+
+func TestConfigFromEnv(t *testing.T) {
+ c := NewParallelCLI(t)
+ defer c.cleanupWithDown(t, "configs")
+
+ t.Run("config from file", func(t *testing.T) {
+ res := icmd.RunCmd(c.NewDockerComposeCmd(t, "-f", "./fixtures/configs/compose.yaml", "run", "from_file"))
+ res.Assert(t, icmd.Expected{Out: "This is my config file"})
+ })
+
+ t.Run("config from env", func(t *testing.T) {
+ res := icmd.RunCmd(c.NewDockerComposeCmd(t, "-f", "./fixtures/configs/compose.yaml", "run", "from_env"),
+ func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "CONFIG=config")
+ })
+ res.Assert(t, icmd.Expected{Out: "config"})
+ })
+
+ t.Run("config inlined", func(t *testing.T) {
+ res := icmd.RunCmd(c.NewDockerComposeCmd(t, "-f", "./fixtures/configs/compose.yaml", "run", "inlined"),
+ func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "CONFIG=config")
+ })
+ res.Assert(t, icmd.Expected{Out: "This is my config"})
+ })
+
+ t.Run("custom target", func(t *testing.T) {
+ res := icmd.RunCmd(c.NewDockerComposeCmd(t, "-f", "./fixtures/configs/compose.yaml", "run", "target"),
+ func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "CONFIG=config")
+ })
+ res.Assert(t, icmd.Expected{Out: "This is my config"})
+ })
+}
diff --git a/pkg/e2e/container_name_test.go b/pkg/e2e/container_name_test.go
new file mode 100644
index 00000000000..1ddc489a196
--- /dev/null
+++ b/pkg/e2e/container_name_test.go
@@ -0,0 +1,44 @@
+//go:build !windows
+// +build !windows
+
+/*
+ Copyright 2022 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "testing"
+
+ "gotest.tools/v3/icmd"
+)
+
+func TestUpContainerNameConflict(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "e2e-container_name_conflict"
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/container_name/compose.yaml", "--project-name", projectName, "up")
+ res.Assert(t, icmd.Expected{ExitCode: 1, Err: `container name "test" is already in use`})
+
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ c.RunDockerComposeCmd(t, "-f", "fixtures/container_name/compose.yaml", "--project-name", projectName, "up", "test")
+
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ c.RunDockerComposeCmd(t, "-f", "fixtures/container_name/compose.yaml", "--project-name", projectName, "up", "another_test")
+}
diff --git a/pkg/e2e/cp_test.go b/pkg/e2e/cp_test.go
index 661e5bee112..4b58c8b6d26 100644
--- a/pkg/e2e/cp_test.go
+++ b/pkg/e2e/cp_test.go
@@ -26,12 +26,12 @@ import (
)
func TestCopy(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
const projectName = "copy_e2e"
t.Cleanup(func() {
- c.RunDockerComposeCmd("-f", "./fixtures/cp-test/compose.yaml", "--project-name", projectName, "down")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/cp-test/compose.yaml", "--project-name", projectName, "down")
os.Remove("./fixtures/cp-test/from-default.txt") //nolint:errcheck
os.Remove("./fixtures/cp-test/from-indexed.txt") //nolint:errcheck
@@ -39,52 +39,45 @@ func TestCopy(t *testing.T) {
})
t.Run("start service", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/cp-test/compose.yaml", "--project-name", projectName, "up", "--scale", "nginx=5", "-d")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/cp-test/compose.yaml", "--project-name", projectName, "up",
+ "--scale", "nginx=5", "-d")
})
t.Run("make sure service is running", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-p", projectName, "ps")
- res.Assert(t, icmd.Expected{Out: `nginx running`})
+ res := c.RunDockerComposeCmd(t, "-p", projectName, "ps")
+ assertServiceStatus(t, projectName, "nginx", "Up", res.Stdout())
})
- t.Run("copy to container copies the file to the first container by default", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/cp-test/compose.yaml", "-p", projectName, "cp", "./fixtures/cp-test/cp-me.txt", "nginx:/tmp/default.txt")
+ t.Run("copy to container copies the file to the all containers by default", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/cp-test/compose.yaml", "-p", projectName, "cp",
+ "./fixtures/cp-test/cp-me.txt", "nginx:/tmp/default.txt")
res.Assert(t, icmd.Expected{ExitCode: 0})
- output := c.RunDockerCmd("exec", projectName+"-nginx-1", "cat", "/tmp/default.txt").Stdout()
+ output := c.RunDockerCmd(t, "exec", projectName+"-nginx-1", "cat", "/tmp/default.txt").Stdout()
assert.Assert(t, strings.Contains(output, `hello world`), output)
- res = c.RunDockerOrExitError("exec", projectName+"_nginx_2", "cat", "/tmp/default.txt")
- res.Assert(t, icmd.Expected{ExitCode: 1})
- })
-
- t.Run("copy to container with a given index copies the file to the given container", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/cp-test/compose.yaml", "-p", projectName, "cp", "--index=3", "./fixtures/cp-test/cp-me.txt", "nginx:/tmp/indexed.txt")
- res.Assert(t, icmd.Expected{ExitCode: 0})
-
- output := c.RunDockerCmd("exec", projectName+"-nginx-3", "cat", "/tmp/indexed.txt").Stdout()
+ output = c.RunDockerCmd(t, "exec", projectName+"-nginx-2", "cat", "/tmp/default.txt").Stdout()
assert.Assert(t, strings.Contains(output, `hello world`), output)
- res = c.RunDockerOrExitError("exec", projectName+"-nginx-2", "cat", "/tmp/indexed.txt")
- res.Assert(t, icmd.Expected{ExitCode: 1})
+ output = c.RunDockerCmd(t, "exec", projectName+"-nginx-3", "cat", "/tmp/default.txt").Stdout()
+ assert.Assert(t, strings.Contains(output, `hello world`), output)
})
- t.Run("copy to container with the all flag copies the file to all containers", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/cp-test/compose.yaml", "-p", projectName, "cp", "--all", "./fixtures/cp-test/cp-me.txt", "nginx:/tmp/all.txt")
+ t.Run("copy to container with a given index copies the file to the given container", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/cp-test/compose.yaml", "-p", projectName, "cp", "--index=3",
+ "./fixtures/cp-test/cp-me.txt", "nginx:/tmp/indexed.txt")
res.Assert(t, icmd.Expected{ExitCode: 0})
- output := c.RunDockerCmd("exec", projectName+"-nginx-1", "cat", "/tmp/all.txt").Stdout()
- assert.Assert(t, strings.Contains(output, `hello world`), output)
-
- output = c.RunDockerCmd("exec", projectName+"-nginx-2", "cat", "/tmp/all.txt").Stdout()
+ output := c.RunDockerCmd(t, "exec", projectName+"-nginx-3", "cat", "/tmp/indexed.txt").Stdout()
assert.Assert(t, strings.Contains(output, `hello world`), output)
- output = c.RunDockerCmd("exec", projectName+"-nginx-3", "cat", "/tmp/all.txt").Stdout()
- assert.Assert(t, strings.Contains(output, `hello world`), output)
+ res = c.RunDockerOrExitError(t, "exec", projectName+"-nginx-2", "cat", "/tmp/indexed.txt")
+ res.Assert(t, icmd.Expected{ExitCode: 1})
})
t.Run("copy from a container copies the file to the host from the first container by default", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/cp-test/compose.yaml", "-p", projectName, "cp", "nginx:/tmp/default.txt", "./fixtures/cp-test/from-default.txt")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/cp-test/compose.yaml", "-p", projectName, "cp",
+ "nginx:/tmp/default.txt", "./fixtures/cp-test/from-default.txt")
res.Assert(t, icmd.Expected{ExitCode: 0})
data, err := os.ReadFile("./fixtures/cp-test/from-default.txt")
@@ -93,7 +86,8 @@ func TestCopy(t *testing.T) {
})
t.Run("copy from a container with a given index copies the file to host", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/cp-test/compose.yaml", "-p", projectName, "cp", "--index=3", "nginx:/tmp/indexed.txt", "./fixtures/cp-test/from-indexed.txt")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/cp-test/compose.yaml", "-p", projectName, "cp", "--index=3",
+ "nginx:/tmp/indexed.txt", "./fixtures/cp-test/from-indexed.txt")
res.Assert(t, icmd.Expected{ExitCode: 0})
data, err := os.ReadFile("./fixtures/cp-test/from-indexed.txt")
@@ -102,13 +96,15 @@ func TestCopy(t *testing.T) {
})
t.Run("copy to and from a container also work with folder", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/cp-test/compose.yaml", "-p", projectName, "cp", "./fixtures/cp-test/cp-folder", "nginx:/tmp")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/cp-test/compose.yaml", "-p", projectName, "cp",
+ "./fixtures/cp-test/cp-folder", "nginx:/tmp")
res.Assert(t, icmd.Expected{ExitCode: 0})
- output := c.RunDockerCmd("exec", projectName+"-nginx-1", "cat", "/tmp/cp-folder/cp-me.txt").Stdout()
+ output := c.RunDockerCmd(t, "exec", projectName+"-nginx-1", "cat", "/tmp/cp-folder/cp-me.txt").Stdout()
assert.Assert(t, strings.Contains(output, `hello world from folder`), output)
- res = c.RunDockerComposeCmd("-f", "./fixtures/cp-test/compose.yaml", "-p", projectName, "cp", "nginx:/tmp/cp-folder", "./fixtures/cp-test/cp-folder2")
+ res = c.RunDockerComposeCmd(t, "-f", "./fixtures/cp-test/compose.yaml", "-p", projectName, "cp",
+ "nginx:/tmp/cp-folder", "./fixtures/cp-test/cp-folder2")
res.Assert(t, icmd.Expected{ExitCode: 0})
data, err := os.ReadFile("./fixtures/cp-test/cp-folder2/cp-me.txt")
diff --git a/pkg/e2e/env_file_test.go b/pkg/e2e/env_file_test.go
new file mode 100644
index 00000000000..d9ea1d4c2b6
--- /dev/null
+++ b/pkg/e2e/env_file_test.go
@@ -0,0 +1,53 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "strings"
+ "testing"
+
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
+)
+
+func TestRawEnvFile(t *testing.T) {
+ c := NewParallelCLI(t)
+ defer c.cleanupWithDown(t, "dotenv")
+
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/dotenv/raw.yaml", "run", "test")
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), "'{\"key\": \"value\"}'")
+}
+
+func TestUnusedMissingEnvFile(t *testing.T) {
+ c := NewParallelCLI(t)
+ defer c.cleanupWithDown(t, "unused_dotenv")
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/env_file/compose.yaml", "up", "-d", "serviceA")
+
+ // Runtime operations should work even with missing env file
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/env_file/compose.yaml", "ps")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/env_file/compose.yaml", "logs")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/env_file/compose.yaml", "down")
+}
+
+func TestRunEnvFile(t *testing.T) {
+ c := NewParallelCLI(t)
+ defer c.cleanupWithDown(t, "run_dotenv")
+
+ res := c.RunDockerComposeCmd(t, "--project-directory", "./fixtures/env_file", "run", "serviceC", "env")
+ res.Assert(t, icmd.Expected{Out: "FOO=BAR"})
+}
diff --git a/pkg/e2e/exec_test.go b/pkg/e2e/exec_test.go
new file mode 100644
index 00000000000..16643a0f8a0
--- /dev/null
+++ b/pkg/e2e/exec_test.go
@@ -0,0 +1,47 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "testing"
+
+ "gotest.tools/v3/icmd"
+)
+
+func TestExec(t *testing.T) {
+ const projectName = "e2e-exec"
+ c := NewParallelCLI(t)
+
+ cleanup := func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--timeout=0", "--remove-orphans")
+ }
+ t.Cleanup(cleanup)
+ cleanup()
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/exec/compose.yaml", "--project-name", projectName, "run", "-d", "test", "cat")
+
+ res := c.RunDockerComposeCmdNoCheck(t, "--project-name", projectName, "exec", "--index=1", "test", "ps")
+ res.Assert(t, icmd.Expected{Err: "service \"test\" is not running container #1", ExitCode: 1})
+
+ res = c.RunDockerComposeCmd(t, "--project-name", projectName, "exec", "test", "ps")
+ res.Assert(t, icmd.Expected{Out: "cat"}) // one-off container was selected
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/exec/compose.yaml", "--project-name", projectName, "up", "-d")
+
+ res = c.RunDockerComposeCmd(t, "--project-name", projectName, "exec", "test", "ps")
+ res.Assert(t, icmd.Expected{Out: "tail"}) // service container was selected
+}
diff --git a/pkg/e2e/export_test.go b/pkg/e2e/export_test.go
new file mode 100644
index 00000000000..baa0dc5b94c
--- /dev/null
+++ b/pkg/e2e/export_test.go
@@ -0,0 +1,50 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "testing"
+)
+
+func TestExport(t *testing.T) {
+ const projectName = "e2e-export-service"
+ c := NewParallelCLI(t)
+
+ cleanup := func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--timeout=0", "--remove-orphans")
+ }
+ t.Cleanup(cleanup)
+ cleanup()
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/export/compose.yaml", "--project-name", projectName, "up", "-d", "service")
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "export", "-o", "service.tar", "service")
+}
+
+func TestExportWithReplicas(t *testing.T) {
+ const projectName = "e2e-export-service-with-replicas"
+ c := NewParallelCLI(t)
+
+ cleanup := func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--timeout=0", "--remove-orphans")
+ }
+ t.Cleanup(cleanup)
+ cleanup()
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/export/compose.yaml", "--project-name", projectName, "up", "-d", "service-with-replicas")
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "export", "-o", "r1.tar", "--index=1", "service-with-replicas")
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "export", "-o", "r2.tar", "--index=2", "service-with-replicas")
+}
diff --git a/pkg/e2e/fixtures/bridge/Dockerfile b/pkg/e2e/fixtures/bridge/Dockerfile
new file mode 100644
index 00000000000..4cdd9857779
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/Dockerfile
@@ -0,0 +1,18 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM alpine
+ENV ENV_FROM_DOCKERFILE=1
+EXPOSE 8081
+CMD ["echo", "Hello from Dockerfile"]
diff --git a/pkg/e2e/fixtures/bridge/compose.yaml b/pkg/e2e/fixtures/bridge/compose.yaml
new file mode 100644
index 00000000000..4fbd9bd94cf
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/compose.yaml
@@ -0,0 +1,31 @@
+services:
+ serviceA:
+ image: alpine
+ build: .
+ ports:
+ - 80:8080
+ networks:
+ - private-network
+ configs:
+ - source: my-config
+ target: /etc/my-config1.txt
+ serviceB:
+ image: alpine
+ build: .
+ ports:
+ - 8081:8082
+ secrets:
+ - my-secrets
+ networks:
+ - private-network
+ - public-network
+configs:
+ my-config:
+ file: my-config.txt
+secrets:
+ my-secrets:
+ file: not-so-secret.txt
+networks:
+ private-network:
+ internal: true
+ public-network: {}
diff --git a/pkg/e2e/fixtures/bridge/expected-helm/Chart.yaml b/pkg/e2e/fixtures/bridge/expected-helm/Chart.yaml
new file mode 100755
index 00000000000..44a00001138
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-helm/Chart.yaml
@@ -0,0 +1,12 @@
+#! Chart.yaml
+apiVersion: v2
+name: bridge
+version: 0.0.1
+# kubeVersion: >= 1.29.1
+description: A generated Helm Chart for bridge generated via compose-bridge.
+type: application
+keywords:
+ - bridge
+appVersion: 'v0.0.1'
+sources:
+annotations:
diff --git a/pkg/e2e/fixtures/bridge/expected-helm/templates/0-bridge-namespace.yaml b/pkg/e2e/fixtures/bridge/expected-helm/templates/0-bridge-namespace.yaml
new file mode 100755
index 00000000000..953ebe7bb12
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-helm/templates/0-bridge-namespace.yaml
@@ -0,0 +1,8 @@
+#! 0-bridge-namespace.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Values.namespace }}
+ labels:
+ com.docker.compose.project: bridge
diff --git a/pkg/e2e/fixtures/bridge/expected-helm/templates/bridge-configs.yaml b/pkg/e2e/fixtures/bridge/expected-helm/templates/bridge-configs.yaml
new file mode 100755
index 00000000000..48e8b0cf6ac
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-helm/templates/bridge-configs.yaml
@@ -0,0 +1,12 @@
+#! bridge-configs.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Values.projectName }}
+ namespace: {{ .Values.namespace }}
+ labels:
+ com.docker.compose.project: bridge
+data:
+ my-config: |
+ My config file
diff --git a/pkg/e2e/fixtures/bridge/expected-helm/templates/my-secrets-secret.yaml b/pkg/e2e/fixtures/bridge/expected-helm/templates/my-secrets-secret.yaml
new file mode 100755
index 00000000000..63659713ba7
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-helm/templates/my-secrets-secret.yaml
@@ -0,0 +1,13 @@
+#! my-secrets-secret.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Secret
+metadata:
+ name: my-secrets
+ namespace: {{ .Values.namespace }}
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.secret: my-secrets
+data:
+ my-secrets: bm90LXNlY3JldA==
+type: Opaque
diff --git a/pkg/e2e/fixtures/bridge/expected-helm/templates/private-network-network-policy.yaml b/pkg/e2e/fixtures/bridge/expected-helm/templates/private-network-network-policy.yaml
new file mode 100755
index 00000000000..0300049be68
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-helm/templates/private-network-network-policy.yaml
@@ -0,0 +1,24 @@
+#! private-network-network-policy.yaml
+# Generated code, do not edit
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: private-network-network-policy
+ namespace: {{ .Values.namespace }}
+spec:
+ podSelector:
+ matchLabels:
+ com.docker.compose.network.private-network: "true"
+ policyTypes:
+ - Ingress
+ - Egress
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ com.docker.compose.network.private-network: "true"
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ com.docker.compose.network.private-network: "true"
diff --git a/pkg/e2e/fixtures/bridge/expected-helm/templates/public-network-network-policy.yaml b/pkg/e2e/fixtures/bridge/expected-helm/templates/public-network-network-policy.yaml
new file mode 100755
index 00000000000..da042b3e8c1
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-helm/templates/public-network-network-policy.yaml
@@ -0,0 +1,24 @@
+#! public-network-network-policy.yaml
+# Generated code, do not edit
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: public-network-network-policy
+ namespace: {{ .Values.namespace }}
+spec:
+ podSelector:
+ matchLabels:
+ com.docker.compose.network.public-network: "true"
+ policyTypes:
+ - Ingress
+ - Egress
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ com.docker.compose.network.public-network: "true"
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ com.docker.compose.network.public-network: "true"
diff --git a/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceA-deployment.yaml b/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceA-deployment.yaml
new file mode 100755
index 00000000000..afef74e8bad
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceA-deployment.yaml
@@ -0,0 +1,49 @@
+#! serviceA-deployment.yaml
+# Generated code, do not edit
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: servicea
+ namespace: {{ .Values.namespace }}
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: {{ .Values.deployment.defaultReplicas }}
+ selector:
+ matchLabels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+ strategy:
+ type: {{ .Values.deployment.strategy }}
+ template:
+ metadata:
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+ com.docker.compose.network.private-network: "true"
+ spec:
+ containers:
+ - name: servicea
+ image: {{ .Values.serviceA.image }}
+ imagePullPolicy: {{ .Values.serviceA.imagePullPolicy }}
+ resources:
+ limits:
+ cpu: {{ .Values.resources.defaultCpuLimit }}
+ memory: {{ .Values.resources.defaultMemoryLimit }}
+ ports:
+ - name: servicea-8080
+ containerPort: 8080
+ volumeMounts:
+ - name: etc-my-config1-txt
+ mountPath: /etc/my-config1.txt
+ subPath: my-config
+ readOnly: true
+ volumes:
+ - name: etc-my-config1-txt
+ configMap:
+ name: {{ .Values.projectName }}
+ items:
+ - key: my-config
+ path: my-config
diff --git a/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceA-expose.yaml b/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceA-expose.yaml
new file mode 100755
index 00000000000..5d733bd2245
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceA-expose.yaml
@@ -0,0 +1,19 @@
+#! serviceA-expose.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Service
+metadata:
+ name: servicea
+ namespace: {{ .Values.namespace }}
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+ app.kubernetes.io/managed-by: Helm
+spec:
+ selector:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+ ports:
+ - name: servicea-8080
+ port: 8080
+ targetPort: servicea-8080
diff --git a/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceA-service.yaml b/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceA-service.yaml
new file mode 100755
index 00000000000..2138281ba93
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceA-service.yaml
@@ -0,0 +1,25 @@
+# check if there is at least one published port
+
+#! serviceA-service.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Service
+metadata:
+ name: servicea-published
+ namespace: {{ .Values.namespace }}
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: {{ .Values.service.type }}
+ selector:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+ ports:
+ - name: servicea-80
+ port: 80
+ protocol: TCP
+ targetPort: servicea-8080
+
+# check if there is at least one published port
diff --git a/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceB-deployment.yaml b/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceB-deployment.yaml
new file mode 100755
index 00000000000..7ea9d998f7f
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceB-deployment.yaml
@@ -0,0 +1,50 @@
+#! serviceB-deployment.yaml
+# Generated code, do not edit
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: serviceb
+ namespace: {{ .Values.namespace }}
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: {{ .Values.deployment.defaultReplicas }}
+ selector:
+ matchLabels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+ strategy:
+ type: {{ .Values.deployment.strategy }}
+ template:
+ metadata:
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+ com.docker.compose.network.private-network: "true"
+ com.docker.compose.network.public-network: "true"
+ spec:
+ containers:
+ - name: serviceb
+ image: {{ .Values.serviceB.image }}
+ imagePullPolicy: {{ .Values.serviceB.imagePullPolicy }}
+ resources:
+ limits:
+ cpu: {{ .Values.resources.defaultCpuLimit }}
+ memory: {{ .Values.resources.defaultMemoryLimit }}
+ ports:
+ - name: serviceb-8082
+ containerPort: 8082
+ volumeMounts:
+ - name: run-secrets-my-secrets
+ mountPath: /run/secrets/my-secrets
+ subPath: my-secrets
+ readOnly: true
+ volumes:
+ - name: run-secrets-my-secrets
+ secret:
+ secretName: my-secrets
+ items:
+ - key: my-secrets
+ path: my-secrets
diff --git a/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceB-expose.yaml b/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceB-expose.yaml
new file mode 100755
index 00000000000..f413254dca0
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceB-expose.yaml
@@ -0,0 +1,19 @@
+#! serviceB-expose.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Service
+metadata:
+ name: serviceb
+ namespace: {{ .Values.namespace }}
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+ app.kubernetes.io/managed-by: Helm
+spec:
+ selector:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+ ports:
+ - name: serviceb-8082
+ port: 8082
+ targetPort: serviceb-8082
diff --git a/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceB-service.yaml b/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceB-service.yaml
new file mode 100755
index 00000000000..6860f3d2804
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-helm/templates/serviceB-service.yaml
@@ -0,0 +1,21 @@
+#! serviceB-service.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Service
+metadata:
+ name: serviceb-published
+ namespace: {{ .Values.namespace }}
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: {{ .Values.service.type }}
+ selector:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+ ports:
+ - name: serviceb-8081
+ port: 8081
+ protocol: TCP
+ targetPort: serviceb-8082
diff --git a/pkg/e2e/fixtures/bridge/expected-helm/values.yaml b/pkg/e2e/fixtures/bridge/expected-helm/values.yaml
new file mode 100755
index 00000000000..78315927666
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-helm/values.yaml
@@ -0,0 +1,30 @@
+#! values.yaml
+# Project Name
+projectName: bridge
+# Namespace
+namespace: bridge
+# Default deployment settings
+deployment:
+ strategy: Recreate
+ defaultReplicas: 1
+# Default resource limits
+resources:
+ defaultCpuLimit: "100m"
+ defaultMemoryLimit: "512Mi"
+# Service settings
+service:
+ type: LoadBalancer
+# Storage settings
+storage:
+ defaultStorageClass: "hostpath"
+ defaultSize: "100Mi"
+ defaultAccessMode: "ReadWriteOnce"
+# Services variables
+serviceA:
+ image: alpine
+ imagePullPolicy: IfNotPresent
+serviceB:
+ image: alpine
+ imagePullPolicy: IfNotPresent
+
+# You can apply the same logic to loop on networks, volumes, secrets and configs...
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/base/0-bridge-namespace.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/0-bridge-namespace.yaml
new file mode 100755
index 00000000000..40e4b0e23f4
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/0-bridge-namespace.yaml
@@ -0,0 +1,8 @@
+#! 0-bridge-namespace.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: bridge
+ labels:
+ com.docker.compose.project: bridge
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/base/bridge-configs.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/bridge-configs.yaml
new file mode 100755
index 00000000000..822d2e076ef
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/bridge-configs.yaml
@@ -0,0 +1,12 @@
+#! bridge-configs.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: bridge
+ namespace: bridge
+ labels:
+ com.docker.compose.project: bridge
+data:
+ my-config: |
+ My config file
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/base/kustomization.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/kustomization.yaml
new file mode 100755
index 00000000000..ff8428feae2
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/kustomization.yaml
@@ -0,0 +1,16 @@
+#! kustomization.yaml
+# Generated code, do not edit
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+ - 0-bridge-namespace.yaml
+ - bridge-configs.yaml
+ - my-secrets-secret.yaml
+ - private-network-network-policy.yaml
+ - public-network-network-policy.yaml
+ - serviceA-deployment.yaml
+ - serviceA-expose.yaml
+ - serviceA-service.yaml
+ - serviceB-deployment.yaml
+ - serviceB-expose.yaml
+ - serviceB-service.yaml
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/base/my-secrets-secret.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/my-secrets-secret.yaml
new file mode 100755
index 00000000000..559eba6a26e
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/my-secrets-secret.yaml
@@ -0,0 +1,13 @@
+#! my-secrets-secret.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Secret
+metadata:
+ name: my-secrets
+ namespace: bridge
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.secret: my-secrets
+data:
+ my-secrets: bm90LXNlY3JldA==
+type: Opaque
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/base/private-network-network-policy.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/private-network-network-policy.yaml
new file mode 100755
index 00000000000..3f59b22dd9d
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/private-network-network-policy.yaml
@@ -0,0 +1,24 @@
+#! private-network-network-policy.yaml
+# Generated code, do not edit
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: private-network-network-policy
+ namespace: bridge
+spec:
+ podSelector:
+ matchLabels:
+ com.docker.compose.network.private-network: "true"
+ policyTypes:
+ - Ingress
+ - Egress
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ com.docker.compose.network.private-network: "true"
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ com.docker.compose.network.private-network: "true"
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/base/public-network-network-policy.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/public-network-network-policy.yaml
new file mode 100755
index 00000000000..04913d4b9a7
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/public-network-network-policy.yaml
@@ -0,0 +1,24 @@
+#! public-network-network-policy.yaml
+# Generated code, do not edit
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: public-network-network-policy
+ namespace: bridge
+spec:
+ podSelector:
+ matchLabels:
+ com.docker.compose.network.public-network: "true"
+ policyTypes:
+ - Ingress
+ - Egress
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ com.docker.compose.network.public-network: "true"
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ com.docker.compose.network.public-network: "true"
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceA-deployment.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceA-deployment.yaml
new file mode 100755
index 00000000000..0779cf56268
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceA-deployment.yaml
@@ -0,0 +1,44 @@
+#! serviceA-deployment.yaml
+# Generated code, do not edit
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: servicea
+ namespace: bridge
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+ com.docker.compose.network.private-network: "true"
+ spec:
+ containers:
+ - name: servicea
+ image: alpine
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: servicea-8080
+ containerPort: 8080
+ volumeMounts:
+ - name: etc-my-config1-txt
+ mountPath: /etc/my-config1.txt
+ subPath: my-config
+ readOnly: true
+ volumes:
+ - name: etc-my-config1-txt
+ configMap:
+ name: bridge
+ items:
+ - key: my-config
+ path: my-config
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceA-expose.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceA-expose.yaml
new file mode 100755
index 00000000000..d0bd013ecff
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceA-expose.yaml
@@ -0,0 +1,18 @@
+#! serviceA-expose.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Service
+metadata:
+ name: servicea
+ namespace: bridge
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+spec:
+ selector:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+ ports:
+ - name: servicea-8080
+ port: 8080
+ targetPort: servicea-8080
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceA-service.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceA-service.yaml
new file mode 100755
index 00000000000..628cf04189c
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceA-service.yaml
@@ -0,0 +1,23 @@
+# check if there is at least one published port
+
+#! serviceA-service.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Service
+metadata:
+ name: servicea-published
+ namespace: bridge
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+spec:
+ selector:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceA
+ ports:
+ - name: servicea-80
+ port: 80
+ protocol: TCP
+ targetPort: servicea-8080
+
+# check if there is at least one published port
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceB-deployment.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceB-deployment.yaml
new file mode 100755
index 00000000000..191720c2014
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceB-deployment.yaml
@@ -0,0 +1,45 @@
+#! serviceB-deployment.yaml
+# Generated code, do not edit
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: serviceb
+ namespace: bridge
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+ com.docker.compose.network.private-network: "true"
+ com.docker.compose.network.public-network: "true"
+ spec:
+ containers:
+ - name: serviceb
+ image: alpine
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: serviceb-8082
+ containerPort: 8082
+ volumeMounts:
+ - name: run-secrets-my-secrets
+ mountPath: /run/secrets/my-secrets
+ subPath: my-secrets
+ readOnly: true
+ volumes:
+ - name: run-secrets-my-secrets
+ secret:
+ secretName: my-secrets
+ items:
+ - key: my-secrets
+ path: my-secrets
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceB-expose.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceB-expose.yaml
new file mode 100755
index 00000000000..2025868991d
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceB-expose.yaml
@@ -0,0 +1,18 @@
+#! serviceB-expose.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Service
+metadata:
+ name: serviceb
+ namespace: bridge
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+spec:
+ selector:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+ ports:
+ - name: serviceb-8082
+ port: 8082
+ targetPort: serviceb-8082
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceB-service.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceB-service.yaml
new file mode 100755
index 00000000000..94104185871
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/base/serviceB-service.yaml
@@ -0,0 +1,19 @@
+#! serviceB-service.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Service
+metadata:
+ name: serviceb-published
+ namespace: bridge
+ labels:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+spec:
+ selector:
+ com.docker.compose.project: bridge
+ com.docker.compose.service: serviceB
+ ports:
+ - name: serviceb-8081
+ port: 8081
+ protocol: TCP
+ targetPort: serviceb-8082
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/overlays/desktop/kustomization.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/overlays/desktop/kustomization.yaml
new file mode 100755
index 00000000000..a192e45f0fe
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/overlays/desktop/kustomization.yaml
@@ -0,0 +1,9 @@
+#! kustomization.yaml
+# Generated code, do not edit
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+ - ../../base
+patches:
+ - path: serviceA-service.yaml
+ - path: serviceB-service.yaml
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/overlays/desktop/serviceA-service.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/overlays/desktop/serviceA-service.yaml
new file mode 100755
index 00000000000..6453b5adba3
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/overlays/desktop/serviceA-service.yaml
@@ -0,0 +1,13 @@
+# check if there is at least one published port
+
+#! serviceA-service.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Service
+metadata:
+ name: servicea-published
+ namespace: bridge
+spec:
+ type: LoadBalancer
+
+# check if there is at least one published port
diff --git a/pkg/e2e/fixtures/bridge/expected-kubernetes/overlays/desktop/serviceB-service.yaml b/pkg/e2e/fixtures/bridge/expected-kubernetes/overlays/desktop/serviceB-service.yaml
new file mode 100755
index 00000000000..f21b674336b
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/expected-kubernetes/overlays/desktop/serviceB-service.yaml
@@ -0,0 +1,9 @@
+#! serviceB-service.yaml
+# Generated code, do not edit
+apiVersion: v1
+kind: Service
+metadata:
+ name: serviceb-published
+ namespace: bridge
+spec:
+ type: LoadBalancer
diff --git a/pkg/e2e/fixtures/bridge/my-config.txt b/pkg/e2e/fixtures/bridge/my-config.txt
new file mode 100644
index 00000000000..24d11e40bb8
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/my-config.txt
@@ -0,0 +1 @@
+My config file
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/bridge/not-so-secret.txt b/pkg/e2e/fixtures/bridge/not-so-secret.txt
new file mode 100644
index 00000000000..4e76a78aebb
--- /dev/null
+++ b/pkg/e2e/fixtures/bridge/not-so-secret.txt
@@ -0,0 +1 @@
+not-secret
\ No newline at end of file
diff --git a/scripts/validate/fileheader b/pkg/e2e/fixtures/build-dependencies/base.dockerfile
old mode 100755
new mode 100644
similarity index 61%
rename from scripts/validate/fileheader
rename to pkg/e2e/fixtures/build-dependencies/base.dockerfile
index 8b503df363f..9dce0b74f41
--- a/scripts/validate/fileheader
+++ b/pkg/e2e/fixtures/build-dependencies/base.dockerfile
@@ -1,6 +1,4 @@
-#!/usr/bin/env sh
-
-# Copyright Docker Compose CLI authors
+# Copyright 2020 Docker Compose CLI authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,14 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-set -eu -o pipefail
-
-if ! command -v ltag; then
- >&2 echo "ERROR: ltag not found. Install with:"
- >&2 echo " go get -u github.com/kunalkushwaha/ltag"
- exit 1
-fi
+FROM alpine
-BASEPATH="${1-}"
+COPY hello.txt /hello.txt
-ltag -t "${BASEPATH}scripts/validate/template" -excludes "validate testdata resolvepath" --check -v
\ No newline at end of file
+CMD [ "/bin/true" ]
diff --git a/pkg/e2e/fixtures/build-dependencies/classic.yaml b/pkg/e2e/fixtures/build-dependencies/classic.yaml
new file mode 100644
index 00000000000..b0dbbaad0a1
--- /dev/null
+++ b/pkg/e2e/fixtures/build-dependencies/classic.yaml
@@ -0,0 +1,14 @@
+services:
+ base:
+ image: base
+ init: true
+ build:
+ context: .
+ dockerfile: base.dockerfile
+ service:
+ init: true
+ depends_on:
+ - base
+ build:
+ context: .
+ dockerfile: service.dockerfile
diff --git a/pkg/e2e/fixtures/build-dependencies/compose-depends_on.yaml b/pkg/e2e/fixtures/build-dependencies/compose-depends_on.yaml
new file mode 100644
index 00000000000..90b2beaef18
--- /dev/null
+++ b/pkg/e2e/fixtures/build-dependencies/compose-depends_on.yaml
@@ -0,0 +1,15 @@
+services:
+ test1:
+ pull_policy: build
+ build:
+ dockerfile_inline: FROM alpine
+ command:
+ - echo
+ - "test 1 success"
+ test2:
+ image: alpine
+ depends_on:
+ - test1
+ command:
+ - echo
+ - "test 2 success"
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/build-dependencies/compose.yaml b/pkg/e2e/fixtures/build-dependencies/compose.yaml
new file mode 100644
index 00000000000..952a7199ef8
--- /dev/null
+++ b/pkg/e2e/fixtures/build-dependencies/compose.yaml
@@ -0,0 +1,13 @@
+services:
+ base:
+ init: true
+ build:
+ context: .
+ dockerfile: base.dockerfile
+ service:
+ init: true
+ build:
+ context: .
+ additional_contexts:
+ base: "service:base"
+ dockerfile: service.dockerfile
diff --git a/pkg/e2e/fixtures/build-dependencies/hello.txt b/pkg/e2e/fixtures/build-dependencies/hello.txt
new file mode 100644
index 00000000000..810e7ba64ac
--- /dev/null
+++ b/pkg/e2e/fixtures/build-dependencies/hello.txt
@@ -0,0 +1 @@
+this file was copied from base -> service
diff --git a/pkg/e2e/fixtures/build-dependencies/service.dockerfile b/pkg/e2e/fixtures/build-dependencies/service.dockerfile
new file mode 100644
index 00000000000..8c710b57321
--- /dev/null
+++ b/pkg/e2e/fixtures/build-dependencies/service.dockerfile
@@ -0,0 +1,17 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM base
+
+CMD [ "cat", "/hello.txt" ]
diff --git a/pkg/e2e/fixtures/build-test/dependencies/compose.yaml b/pkg/e2e/fixtures/build-test/dependencies/compose.yaml
new file mode 100644
index 00000000000..eb5de31f943
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/dependencies/compose.yaml
@@ -0,0 +1,26 @@
+services:
+ firstbuild:
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ additional_contexts:
+ dep1: service:dep1
+ entrypoint: ["echo", "Hello from firstbuild"]
+ depends_on:
+ - dep1
+
+ secondbuild:
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ additional_contexts:
+ dep1: service:dep1
+ entrypoint: ["echo", "Hello from secondbuild"]
+ depends_on:
+ - dep1
+
+ dep1:
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ entrypoint: ["echo", "Hello from dep1"]
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/build-test/entitlements/Dockerfile b/pkg/e2e/fixtures/build-test/entitlements/Dockerfile
new file mode 100644
index 00000000000..a242eb52d4b
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/entitlements/Dockerfile
@@ -0,0 +1,19 @@
+# syntax = docker/dockerfile:experimental
+
+
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM alpine
+RUN --security=insecure cat /proc/self/status | grep CapEff
diff --git a/pkg/e2e/fixtures/build-test/entitlements/compose.yaml b/pkg/e2e/fixtures/build-test/entitlements/compose.yaml
new file mode 100644
index 00000000000..403529e6477
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/entitlements/compose.yaml
@@ -0,0 +1,7 @@
+services:
+ privileged-service:
+ build:
+ context: .
+ entitlements:
+ - security.insecure
+
diff --git a/pkg/e2e/fixtures/build-test/escaped/Dockerfile b/pkg/e2e/fixtures/build-test/escaped/Dockerfile
new file mode 100644
index 00000000000..dd507f4fffd
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/escaped/Dockerfile
@@ -0,0 +1,17 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM alpine
+ARG foo
+RUN echo foo is $foo
diff --git a/pkg/e2e/fixtures/build-test/escaped/compose.yaml b/pkg/e2e/fixtures/build-test/escaped/compose.yaml
new file mode 100644
index 00000000000..2d0077b9e63
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/escaped/compose.yaml
@@ -0,0 +1,23 @@
+services:
+ foo:
+ build:
+ context: .
+ args:
+ foo: $${bar}
+
+ echo:
+ build:
+ dockerfile_inline: |
+ FROM bash
+ RUN <<'EOF'
+ echo $(seq 10)
+ EOF
+
+ arg:
+ build:
+ args:
+ BOOL: "true"
+ dockerfile_inline: |
+ FROM alpine:latest
+ ARG BOOL
+ RUN /bin/$${BOOL}
diff --git a/pkg/e2e/fixtures/build-test/long-output-line/Dockerfile b/pkg/e2e/fixtures/build-test/long-output-line/Dockerfile
new file mode 100644
index 00000000000..5227a491434
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/long-output-line/Dockerfile
@@ -0,0 +1,49 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+FROM alpine
+# We generate warnings *on purpose* to bloat the JSON output of bake
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
+ARG AWS_SECRET_ACCESS_KEY=FAKE_TO_GENERATE_WARNING_OUTPUT
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/build-test/long-output-line/compose.yaml b/pkg/e2e/fixtures/build-test/long-output-line/compose.yaml
new file mode 100644
index 00000000000..d0e2a4fffdc
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/long-output-line/compose.yaml
@@ -0,0 +1,5 @@
+services:
+ long-line:
+ build:
+ context: .
+ dockerfile: Dockerfile
diff --git a/scripts/validate/template/dockerfile.txt b/pkg/e2e/fixtures/build-test/minimal/Dockerfile
similarity index 96%
rename from scripts/validate/template/dockerfile.txt
rename to pkg/e2e/fixtures/build-test/minimal/Dockerfile
index 31e00001cf2..968515a22ed 100644
--- a/scripts/validate/template/dockerfile.txt
+++ b/pkg/e2e/fixtures/build-test/minimal/Dockerfile
@@ -11,3 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
+FROM scratch
+COPY . .
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/build-test/minimal/compose.yaml b/pkg/e2e/fixtures/build-test/minimal/compose.yaml
new file mode 100644
index 00000000000..8362c60b900
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/minimal/compose.yaml
@@ -0,0 +1,3 @@
+services:
+ test:
+ build: .
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/build-test/nginx-build/static/index.html b/pkg/e2e/fixtures/build-test/nginx-build/static/index.html
index 63159b9e91b..914e3406cb9 100644
--- a/pkg/e2e/fixtures/build-test/nginx-build/static/index.html
+++ b/pkg/e2e/fixtures/build-test/nginx-build/static/index.html
@@ -2,7 +2,7 @@
- Docker Nginx
+ Static file 2
Hello from Nginx container
diff --git a/pkg/e2e/fixtures/build-test/nginx-build2/static2/index.html b/pkg/e2e/fixtures/build-test/nginx-build2/static2/index.html
index 63159b9e91b..914e3406cb9 100644
--- a/pkg/e2e/fixtures/build-test/nginx-build2/static2/index.html
+++ b/pkg/e2e/fixtures/build-test/nginx-build2/static2/index.html
@@ -2,7 +2,7 @@
- Docker Nginx
+ Static file 2
Hello from Nginx container
diff --git a/pkg/e2e/fixtures/build-test/platforms/Dockerfile b/pkg/e2e/fixtures/build-test/platforms/Dockerfile
new file mode 100644
index 00000000000..ef22c17f6a5
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/platforms/Dockerfile
@@ -0,0 +1,22 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM --platform=$BUILDPLATFORM golang:alpine AS build
+
+ARG TARGETPLATFORM
+ARG BUILDPLATFORM
+RUN echo "I am building for $TARGETPLATFORM, running on $BUILDPLATFORM" > /log
+
+FROM alpine
+COPY --from=build /log /log
diff --git a/pkg/e2e/fixtures/build-test/platforms/compose-multiple-platform-builds.yaml b/pkg/e2e/fixtures/build-test/platforms/compose-multiple-platform-builds.yaml
new file mode 100644
index 00000000000..aac3a3db90d
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/platforms/compose-multiple-platform-builds.yaml
@@ -0,0 +1,23 @@
+services:
+ serviceA:
+ image: build-test-platform-a:test
+ build:
+ context: ./contextServiceA
+ platforms:
+ - linux/amd64
+ - linux/arm64
+ serviceB:
+ image: build-test-platform-b:test
+ build:
+ context: ./contextServiceB
+ platforms:
+ - linux/amd64
+ - linux/arm64
+ serviceC:
+ image: build-test-platform-c:test
+ build:
+ context: ./contextServiceC
+ platforms:
+ - linux/amd64
+ - linux/arm64
+
diff --git a/pkg/e2e/fixtures/build-test/platforms/compose-service-platform-and-no-build-platforms.yaml b/pkg/e2e/fixtures/build-test/platforms/compose-service-platform-and-no-build-platforms.yaml
new file mode 100644
index 00000000000..3d0eafbfc24
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/platforms/compose-service-platform-and-no-build-platforms.yaml
@@ -0,0 +1,6 @@
+services:
+ platforms:
+ image: build-test-platform:test
+ platform: linux/386
+ build:
+ context: .
diff --git a/pkg/e2e/fixtures/build-test/platforms/compose-service-platform-not-in-build-platforms.yaml b/pkg/e2e/fixtures/build-test/platforms/compose-service-platform-not-in-build-platforms.yaml
new file mode 100644
index 00000000000..bed88fa51f3
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/platforms/compose-service-platform-not-in-build-platforms.yaml
@@ -0,0 +1,9 @@
+services:
+ platforms:
+ image: build-test-platform:test
+ platform: linux/riscv64
+ build:
+ context: .
+ platforms:
+ - linux/amd64
+ - linux/arm64
diff --git a/pkg/e2e/fixtures/build-test/platforms/compose-unsupported-platform.yml b/pkg/e2e/fixtures/build-test/platforms/compose-unsupported-platform.yml
new file mode 100644
index 00000000000..e3342829168
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/platforms/compose-unsupported-platform.yml
@@ -0,0 +1,8 @@
+services:
+ platforms:
+ image: build-test-platform:test
+ build:
+ context: .
+ platforms:
+ - unsupported/unsupported
+ - linux/amd64
diff --git a/pkg/e2e/fixtures/build-test/platforms/compose.yaml b/pkg/e2e/fixtures/build-test/platforms/compose.yaml
new file mode 100644
index 00000000000..73421f4793f
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/platforms/compose.yaml
@@ -0,0 +1,9 @@
+services:
+ platforms:
+ image: build-test-platform:test
+ build:
+ context: .
+ platforms:
+ - linux/amd64
+ - linux/arm64
+
diff --git a/pkg/e2e/fixtures/build-test/platforms/contextServiceA/Dockerfile b/pkg/e2e/fixtures/build-test/platforms/contextServiceA/Dockerfile
new file mode 100644
index 00000000000..468b2b10dd6
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/platforms/contextServiceA/Dockerfile
@@ -0,0 +1,22 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM --platform=$BUILDPLATFORM golang:alpine AS build
+
+ARG TARGETPLATFORM
+ARG BUILDPLATFORM
+RUN echo "I'm Service A and I am building for $TARGETPLATFORM, running on $BUILDPLATFORM" > /log
+
+FROM alpine
+COPY --from=build /log /log
diff --git a/pkg/e2e/fixtures/build-test/platforms/contextServiceB/Dockerfile b/pkg/e2e/fixtures/build-test/platforms/contextServiceB/Dockerfile
new file mode 100644
index 00000000000..cfa2ae34ad7
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/platforms/contextServiceB/Dockerfile
@@ -0,0 +1,22 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM --platform=$BUILDPLATFORM golang:alpine AS build
+
+ARG TARGETPLATFORM
+ARG BUILDPLATFORM
+RUN echo "I'm Service B and I am building for $TARGETPLATFORM, running on $BUILDPLATFORM" > /log
+
+FROM alpine
+COPY --from=build /log /log
diff --git a/pkg/e2e/fixtures/build-test/platforms/contextServiceC/Dockerfile b/pkg/e2e/fixtures/build-test/platforms/contextServiceC/Dockerfile
new file mode 100644
index 00000000000..3216f618295
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/platforms/contextServiceC/Dockerfile
@@ -0,0 +1,22 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM --platform=$BUILDPLATFORM golang:alpine AS build
+
+ARG TARGETPLATFORM
+ARG BUILDPLATFORM
+RUN echo "I'm Service C and I am building for $TARGETPLATFORM, running on $BUILDPLATFORM" > /log
+
+FROM alpine
+COPY --from=build /log /log
diff --git a/pkg/e2e/fixtures/build-test/privileged/Dockerfile b/pkg/e2e/fixtures/build-test/privileged/Dockerfile
new file mode 100644
index 00000000000..a242eb52d4b
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/privileged/Dockerfile
@@ -0,0 +1,19 @@
+# syntax = docker/dockerfile:experimental
+
+
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM alpine
+RUN --security=insecure cat /proc/self/status | grep CapEff
diff --git a/pkg/e2e/fixtures/build-test/privileged/compose.yaml b/pkg/e2e/fixtures/build-test/privileged/compose.yaml
new file mode 100644
index 00000000000..ead867cae83
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/privileged/compose.yaml
@@ -0,0 +1,5 @@
+services:
+ privileged-service:
+ build:
+ context: .
+ privileged: true
diff --git a/pkg/e2e/fixtures/build-test/profiles/Dockerfile b/pkg/e2e/fixtures/build-test/profiles/Dockerfile
new file mode 100644
index 00000000000..94eb80e9c7d
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/profiles/Dockerfile
@@ -0,0 +1,19 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+FROM alpine
+RUN --mount=type=secret,id=test-secret ls -la /run/secrets/; cp /run/secrets/test-secret /tmp
+
+CMD ["cat", "/tmp/test-secret"]
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/build-test/profiles/compose.yaml b/pkg/e2e/fixtures/build-test/profiles/compose.yaml
new file mode 100644
index 00000000000..877babc0e5f
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/profiles/compose.yaml
@@ -0,0 +1,12 @@
+secrets:
+ test-secret:
+ file: test-secret.txt
+
+services:
+ secret-build-test:
+ profiles: ["test"]
+ build:
+ context: .
+ dockerfile: Dockerfile
+ secrets:
+ - test-secret
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/build-test/profiles/test-secret.txt b/pkg/e2e/fixtures/build-test/profiles/test-secret.txt
new file mode 100644
index 00000000000..78882121907
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/profiles/test-secret.txt
@@ -0,0 +1 @@
+SECRET
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/build-test/secrets/.env b/pkg/e2e/fixtures/build-test/secrets/.env
new file mode 100644
index 00000000000..9f8bc4f5d23
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/secrets/.env
@@ -0,0 +1 @@
+ANOTHER_SECRET=zot
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/build-test/secrets/Dockerfile b/pkg/e2e/fixtures/build-test/secrets/Dockerfile
new file mode 100644
index 00000000000..336673b050c
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/secrets/Dockerfile
@@ -0,0 +1,30 @@
+# syntax=docker/dockerfile:1
+
+
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM alpine
+
+RUN echo "foo" > /tmp/expected
+RUN --mount=type=secret,id=mysecret cat /run/secrets/mysecret > /tmp/actual
+RUN diff /tmp/expected /tmp/actual
+
+RUN echo "bar" > /tmp/expected
+RUN --mount=type=secret,id=build_secret cat /run/secrets/build_secret > tmp/actual
+RUN diff --ignore-all-space /tmp/expected /tmp/actual
+
+RUN echo "zot" > /tmp/expected
+RUN --mount=type=secret,id=dotenvsecret cat /run/secrets/dotenvsecret > tmp/actual
+RUN diff --ignore-all-space /tmp/expected /tmp/actual
diff --git a/pkg/e2e/fixtures/build-test/secrets/compose.yml b/pkg/e2e/fixtures/build-test/secrets/compose.yml
new file mode 100644
index 00000000000..f041acf6101
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/secrets/compose.yml
@@ -0,0 +1,18 @@
+services:
+ ssh:
+ image: build-test-secret
+ build:
+ context: .
+ secrets:
+ - mysecret
+ - dotenvsecret
+ - source: envsecret
+ target: build_secret
+
+secrets:
+ mysecret:
+ file: ./secret.txt
+ envsecret:
+ environment: SOME_SECRET
+ dotenvsecret:
+ environment: ANOTHER_SECRET
diff --git a/pkg/e2e/fixtures/build-test/secrets/secret.txt b/pkg/e2e/fixtures/build-test/secrets/secret.txt
new file mode 100644
index 00000000000..257cc5642cb
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/secrets/secret.txt
@@ -0,0 +1 @@
+foo
diff --git a/pkg/e2e/fixtures/build-test/ssh/Dockerfile b/pkg/e2e/fixtures/build-test/ssh/Dockerfile
index 1a1831afa9b..d2fe8e5b895 100644
--- a/pkg/e2e/fixtures/build-test/ssh/Dockerfile
+++ b/pkg/e2e/fixtures/build-test/ssh/Dockerfile
@@ -1,4 +1,4 @@
-# syntax=docker/dockerfile:1.2
+# syntax=docker/dockerfile:1
# Copyright 2020 Docker Compose CLI authors
diff --git a/pkg/e2e/fixtures/build-test/ssh/compose.yaml b/pkg/e2e/fixtures/build-test/ssh/compose.yaml
index 27052a958e7..2fd56ab1494 100644
--- a/pkg/e2e/fixtures/build-test/ssh/compose.yaml
+++ b/pkg/e2e/fixtures/build-test/ssh/compose.yaml
@@ -4,4 +4,4 @@ services:
build:
context: .
ssh:
- - fake-ssh=./fixtures/build-test/ssh/fake_rsa
+ - fake-ssh=./fake_rsa
diff --git a/pkg/e2e/fixtures/build-test/sub-dependencies/compose.yaml b/pkg/e2e/fixtures/build-test/sub-dependencies/compose.yaml
new file mode 100644
index 00000000000..5662d689b3c
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/sub-dependencies/compose.yaml
@@ -0,0 +1,36 @@
+services:
+ main:
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ additional_contexts:
+ dep1: service:dep1
+ dep2: service:dep2
+ entrypoint: ["echo", "Hello from main"]
+
+ dep1:
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ additional_contexts:
+ subdep1: service:subdep1
+ subdep2: service:subdep2
+ entrypoint: ["echo", "Hello from dep1"]
+
+ dep2:
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ entrypoint: ["echo", "Hello from dep2"]
+
+ subdep1:
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ entrypoint: ["echo", "Hello from subdep1"]
+
+ subdep2:
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ entrypoint: ["echo", "Hello from subdep2"]
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/build-test/subset/compose.yaml b/pkg/e2e/fixtures/build-test/subset/compose.yaml
new file mode 100644
index 00000000000..6bae0f1266b
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/subset/compose.yaml
@@ -0,0 +1,14 @@
+services:
+ main:
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ entrypoint: ["echo", "Hello from main"]
+ depends_on:
+ - dep1
+
+ dep1:
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ entrypoint: ["echo", "Hello from dep1"]
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/build-test/tags/Dockerfile b/pkg/e2e/fixtures/build-test/tags/Dockerfile
new file mode 100644
index 00000000000..09b9df4ad83
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/tags/Dockerfile
@@ -0,0 +1,17 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM nginx:alpine
+
+RUN echo "SUCCESS"
diff --git a/pkg/e2e/fixtures/build-test/tags/compose.yaml b/pkg/e2e/fixtures/build-test/tags/compose.yaml
new file mode 100644
index 00000000000..de0178024f8
--- /dev/null
+++ b/pkg/e2e/fixtures/build-test/tags/compose.yaml
@@ -0,0 +1,9 @@
+services:
+ nginx:
+ image: build-test-tags
+ build:
+ context: .
+ tags:
+ - docker.io/docker/build-test-tags:1.0.0
+ - other-image-name:v1.0.0
+
diff --git a/pkg/e2e/fixtures/cascade-stop-test/compose.yaml b/pkg/e2e/fixtures/cascade-stop-test/compose.yaml
deleted file mode 100644
index 1274fc53785..00000000000
--- a/pkg/e2e/fixtures/cascade-stop-test/compose.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-services:
- should_fail:
- image: alpine
- command: ls /does_not_exist
- sleep: # will be killed
- image: alpine
- command: ping localhost
diff --git a/pkg/e2e/fixtures/cascade/compose.yaml b/pkg/e2e/fixtures/cascade/compose.yaml
new file mode 100644
index 00000000000..fe79adb58a3
--- /dev/null
+++ b/pkg/e2e/fixtures/cascade/compose.yaml
@@ -0,0 +1,19 @@
+services:
+ running:
+ image: alpine
+ command: sleep infinity
+ init: true
+
+ exit:
+ image: alpine
+ command: /bin/true
+ depends_on:
+ running:
+ condition: service_started
+
+ fail:
+ image: alpine
+ command: sh -c "return 111"
+ depends_on:
+ exit:
+ condition: service_completed_successfully
diff --git a/pkg/e2e/fixtures/commit/compose.yaml b/pkg/e2e/fixtures/commit/compose.yaml
new file mode 100644
index 00000000000..28e4b15bd68
--- /dev/null
+++ b/pkg/e2e/fixtures/commit/compose.yaml
@@ -0,0 +1,9 @@
+services:
+ service:
+ image: alpine
+ command: sleep infinity
+ service-with-replicas:
+ image: alpine
+ command: sleep infinity
+ deploy:
+ replicas: 3
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/compose-pull/duplicate-images/compose.yaml b/pkg/e2e/fixtures/compose-pull/duplicate-images/compose.yaml
new file mode 100644
index 00000000000..4a0d4c7b905
--- /dev/null
+++ b/pkg/e2e/fixtures/compose-pull/duplicate-images/compose.yaml
@@ -0,0 +1,7 @@
+services:
+ simple:
+ image: alpine:3.13
+ command: top
+ another:
+ image: alpine:3.13
+ command: top
diff --git a/pkg/e2e/fixtures/compose-pull/image-present-locally/compose.yaml b/pkg/e2e/fixtures/compose-pull/image-present-locally/compose.yaml
new file mode 100644
index 00000000000..a23cf58572b
--- /dev/null
+++ b/pkg/e2e/fixtures/compose-pull/image-present-locally/compose.yaml
@@ -0,0 +1,9 @@
+services:
+ simple:
+ image: alpine:3.13.12
+ pull_policy: missing
+ command: top
+ latest:
+ image: alpine:latest
+ pull_policy: missing
+ command: top
diff --git a/pkg/e2e/fixtures/compose-pull/no-image-name-given/compose.yaml b/pkg/e2e/fixtures/compose-pull/no-image-name-given/compose.yaml
new file mode 100644
index 00000000000..69494e7a1c3
--- /dev/null
+++ b/pkg/e2e/fixtures/compose-pull/no-image-name-given/compose.yaml
@@ -0,0 +1,3 @@
+services:
+ no-image-service:
+ build: .
diff --git a/pkg/e2e/fixtures/compose-pull/simple/compose.yaml b/pkg/e2e/fixtures/compose-pull/simple/compose.yaml
new file mode 100644
index 00000000000..2a5fd32a7e6
--- /dev/null
+++ b/pkg/e2e/fixtures/compose-pull/simple/compose.yaml
@@ -0,0 +1,7 @@
+services:
+ simple:
+ image: alpine:3.14
+ command: top
+ another:
+ image: alpine:3.15
+ command: top
diff --git a/scripts/validate/template/makefile.txt b/pkg/e2e/fixtures/compose-pull/unknown-image/Dockerfile
similarity index 96%
rename from scripts/validate/template/makefile.txt
rename to pkg/e2e/fixtures/compose-pull/unknown-image/Dockerfile
index 31e00001cf2..fd3bd1dc704 100644
--- a/scripts/validate/template/makefile.txt
+++ b/pkg/e2e/fixtures/compose-pull/unknown-image/Dockerfile
@@ -11,3 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
+FROM alpine:3.15
+
diff --git a/pkg/e2e/fixtures/compose-pull/unknown-image/compose.yaml b/pkg/e2e/fixtures/compose-pull/unknown-image/compose.yaml
new file mode 100644
index 00000000000..de40d09c49a
--- /dev/null
+++ b/pkg/e2e/fixtures/compose-pull/unknown-image/compose.yaml
@@ -0,0 +1,9 @@
+services:
+ fail:
+ image: does_not_exists
+ can_build:
+ image: doesn_t_exists_either
+ build: .
+ valid:
+ image: alpine:3.15
+
diff --git a/pkg/e2e/fixtures/config/compose.yaml b/pkg/e2e/fixtures/config/compose.yaml
new file mode 100644
index 00000000000..634f521b23b
--- /dev/null
+++ b/pkg/e2e/fixtures/config/compose.yaml
@@ -0,0 +1,5 @@
+services:
+ test:
+ image: test
+ ports:
+ - ${PORT:-8080}:80
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/configs/compose.yaml b/pkg/e2e/fixtures/configs/compose.yaml
new file mode 100644
index 00000000000..34d4827dda5
--- /dev/null
+++ b/pkg/e2e/fixtures/configs/compose.yaml
@@ -0,0 +1,33 @@
+services:
+ from_env:
+ image: alpine
+ configs:
+ - source: from_env
+ command: cat /from_env
+
+ from_file:
+ image: alpine
+ configs:
+ - source: from_file
+ command: cat /from_file
+
+ inlined:
+ image: alpine
+ configs:
+ - source: inlined
+ command: cat /inlined
+
+ target:
+ image: alpine
+ configs:
+ - source: inlined
+ target: /target
+ command: cat /target
+
+configs:
+ from_env:
+ environment: CONFIG
+ from_file:
+ file: config.txt
+ inlined:
+ content: This is my $CONFIG
diff --git a/pkg/e2e/fixtures/configs/config.txt b/pkg/e2e/fixtures/configs/config.txt
new file mode 100644
index 00000000000..58b9a4061dc
--- /dev/null
+++ b/pkg/e2e/fixtures/configs/config.txt
@@ -0,0 +1 @@
+This is my config file
diff --git a/pkg/e2e/fixtures/container_name/compose.yaml b/pkg/e2e/fixtures/container_name/compose.yaml
new file mode 100644
index 00000000000..c967b885d8e
--- /dev/null
+++ b/pkg/e2e/fixtures/container_name/compose.yaml
@@ -0,0 +1,10 @@
+services:
+ test:
+ image: alpine
+ container_name: test
+ command: /bin/true
+
+ another_test:
+ image: alpine
+ container_name: test
+ command: /bin/true
diff --git a/pkg/e2e/fixtures/dependencies/Dockerfile b/pkg/e2e/fixtures/dependencies/Dockerfile
new file mode 100644
index 00000000000..fe5992a8df8
--- /dev/null
+++ b/pkg/e2e/fixtures/dependencies/Dockerfile
@@ -0,0 +1,16 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM busybox:1.35.0
+RUN echo "hello"
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/dependencies/compose.yaml b/pkg/e2e/fixtures/dependencies/compose.yaml
index 82d3e4e8445..099c5f18a73 100644
--- a/pkg/e2e/fixtures/dependencies/compose.yaml
+++ b/pkg/e2e/fixtures/dependencies/compose.yaml
@@ -1,8 +1,10 @@
services:
foo:
image: nginx:alpine
+ command: "${COMMAND}"
depends_on:
- bar
bar:
image: nginx:alpine
+ scale: 2
diff --git a/pkg/e2e/fixtures/dependencies/dependency-exit.yaml b/pkg/e2e/fixtures/dependencies/dependency-exit.yaml
new file mode 100644
index 00000000000..7ba02ba7948
--- /dev/null
+++ b/pkg/e2e/fixtures/dependencies/dependency-exit.yaml
@@ -0,0 +1,10 @@
+services:
+ web:
+ image: nginx:alpine
+ depends_on:
+ db:
+ condition: service_healthy
+ db:
+ image: alpine
+ command: sh -c "exit 1"
+
diff --git a/pkg/e2e/fixtures/dependencies/deps-completed-successfully.yaml b/pkg/e2e/fixtures/dependencies/deps-completed-successfully.yaml
new file mode 100644
index 00000000000..5e65cb7a0a5
--- /dev/null
+++ b/pkg/e2e/fixtures/dependencies/deps-completed-successfully.yaml
@@ -0,0 +1,11 @@
+services:
+ oneshot:
+ image: alpine
+ command: echo 'hello world'
+ longrunning:
+ image: alpine
+ init: true
+ depends_on:
+ oneshot:
+ condition: service_completed_successfully
+ command: sleep infinity
diff --git a/pkg/e2e/fixtures/dependencies/deps-not-required.yaml b/pkg/e2e/fixtures/dependencies/deps-not-required.yaml
new file mode 100644
index 00000000000..44286846bd0
--- /dev/null
+++ b/pkg/e2e/fixtures/dependencies/deps-not-required.yaml
@@ -0,0 +1,11 @@
+services:
+ foo:
+ image: bash
+ command: echo "foo"
+ depends_on:
+ bar:
+ required: false
+ condition: service_healthy
+ bar:
+ image: nginx:alpine
+ profiles: [not-required]
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/dependencies/recreate-no-deps.yaml b/pkg/e2e/fixtures/dependencies/recreate-no-deps.yaml
new file mode 100644
index 00000000000..0b44c273dfb
--- /dev/null
+++ b/pkg/e2e/fixtures/dependencies/recreate-no-deps.yaml
@@ -0,0 +1,17 @@
+version: '3.8'
+services:
+ my-service:
+ image: alpine
+ command: tail -f /dev/null
+ init: true
+ depends_on:
+ nginx: {condition: service_healthy}
+
+ nginx:
+ image: nginx:alpine
+ stop_signal: SIGTERM
+ healthcheck:
+ test: "echo | nc -w 5 localhost:80"
+ interval: 2s
+ timeout: 1s
+ retries: 10
diff --git a/pkg/e2e/fixtures/dependencies/service-image-depends-on.yaml b/pkg/e2e/fixtures/dependencies/service-image-depends-on.yaml
new file mode 100644
index 00000000000..3139978361e
--- /dev/null
+++ b/pkg/e2e/fixtures/dependencies/service-image-depends-on.yaml
@@ -0,0 +1,9 @@
+services:
+ foo:
+ image: built-image-dependency
+ build:
+ context: .
+ bar:
+ image: built-image-dependency
+ depends_on:
+ - foo
diff --git a/pkg/e2e/fixtures/dotenv/.env b/pkg/e2e/fixtures/dotenv/.env
new file mode 100644
index 00000000000..1230f22dd41
--- /dev/null
+++ b/pkg/e2e/fixtures/dotenv/.env
@@ -0,0 +1,3 @@
+COMPOSE_FILE="${COMPOSE_FILE:-development/compose.yaml}"
+IMAGE_NAME=test
+COMPOSE_PROFILES=test
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/dotenv/.env.raw b/pkg/e2e/fixtures/dotenv/.env.raw
new file mode 100644
index 00000000000..306900800fc
--- /dev/null
+++ b/pkg/e2e/fixtures/dotenv/.env.raw
@@ -0,0 +1 @@
+TEST_VAR='{"key": "value"}'
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/dotenv/development/.env b/pkg/e2e/fixtures/dotenv/development/.env
new file mode 100644
index 00000000000..93690287799
--- /dev/null
+++ b/pkg/e2e/fixtures/dotenv/development/.env
@@ -0,0 +1,2 @@
+IMAGE_NAME="${IMAGE_NAME:-backend}"
+IMAGE_TAG="${IMAGE_TAG:-latest}"
diff --git a/pkg/e2e/fixtures/dotenv/development/compose.yaml b/pkg/e2e/fixtures/dotenv/development/compose.yaml
new file mode 100644
index 00000000000..4731d635b60
--- /dev/null
+++ b/pkg/e2e/fixtures/dotenv/development/compose.yaml
@@ -0,0 +1,7 @@
+services:
+ backend:
+ image: $IMAGE_NAME:$IMAGE_TAG
+ test:
+ profiles:
+ - test
+ image: enabled:profile
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/dotenv/raw.yaml b/pkg/e2e/fixtures/dotenv/raw.yaml
new file mode 100644
index 00000000000..a65664273d3
--- /dev/null
+++ b/pkg/e2e/fixtures/dotenv/raw.yaml
@@ -0,0 +1,7 @@
+services:
+ test:
+ image: alpine
+ command: sh -c "echo $$TEST_VAR"
+ env_file:
+ - path: .env.raw
+ format: raw # parse without interpolation
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/env-secret/child/compose.yaml b/pkg/e2e/fixtures/env-secret/child/compose.yaml
new file mode 100644
index 00000000000..6e4ab8213cf
--- /dev/null
+++ b/pkg/e2e/fixtures/env-secret/child/compose.yaml
@@ -0,0 +1,10 @@
+services:
+ included:
+ image: alpine
+ secrets:
+ - my-secret
+ command: cat /run/secrets/my-secret
+
+secrets:
+ my-secret:
+ environment: 'MY_SECRET'
diff --git a/pkg/e2e/fixtures/env-secret/compose.yaml b/pkg/e2e/fixtures/env-secret/compose.yaml
new file mode 100644
index 00000000000..51052d36d21
--- /dev/null
+++ b/pkg/e2e/fixtures/env-secret/compose.yaml
@@ -0,0 +1,20 @@
+include:
+ - path: child/compose.yaml
+ env_file:
+ - secret.env
+
+services:
+ foo:
+ image: alpine
+ secrets:
+ - source: secret
+ target: bar
+ uid: "1005"
+ gid: "1005"
+ mode: 0440
+ command: cat /run/secrets/bar
+
+secrets:
+ secret:
+ environment: SECRET
+
diff --git a/pkg/e2e/fixtures/env-secret/secret.env b/pkg/e2e/fixtures/env-secret/secret.env
new file mode 100644
index 00000000000..a195fd539b0
--- /dev/null
+++ b/pkg/e2e/fixtures/env-secret/secret.env
@@ -0,0 +1 @@
+MY_SECRET='this-is-secret'
diff --git a/pkg/e2e/fixtures/env_file/compose.yaml b/pkg/e2e/fixtures/env_file/compose.yaml
new file mode 100644
index 00000000000..9983f573841
--- /dev/null
+++ b/pkg/e2e/fixtures/env_file/compose.yaml
@@ -0,0 +1,14 @@
+services:
+ serviceA:
+ image: nginx:latest
+
+ serviceB:
+ image: nginx:latest
+ env_file:
+ - /doesnotexist/.env
+
+ serviceC:
+ profiles: ["test"]
+ image: alpine
+ env_file: test.env
+
diff --git a/pkg/e2e/fixtures/env_file/test.env b/pkg/e2e/fixtures/env_file/test.env
new file mode 100644
index 00000000000..15c36f50ef7
--- /dev/null
+++ b/pkg/e2e/fixtures/env_file/test.env
@@ -0,0 +1 @@
+FOO=BAR
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/environment/empty-variable/Dockerfile b/pkg/e2e/fixtures/environment/empty-variable/Dockerfile
new file mode 100644
index 00000000000..a7dac49e0fb
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/empty-variable/Dockerfile
@@ -0,0 +1,17 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM alpine
+ENV EMPTY=not_empty
+CMD ["sh", "-c", "echo \"=$EMPTY=\""]
diff --git a/pkg/e2e/fixtures/environment/empty-variable/compose.yaml b/pkg/e2e/fixtures/environment/empty-variable/compose.yaml
new file mode 100644
index 00000000000..6ac057af32e
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/empty-variable/compose.yaml
@@ -0,0 +1,7 @@
+services:
+ empty-variable:
+ build:
+ context: .
+ image: empty-variable
+ environment:
+ - EMPTY # expect to propagate value from user's env OR unset in container
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/environment/env-file-comments/.env b/pkg/e2e/fixtures/environment/env-file-comments/.env
new file mode 100644
index 00000000000..068e52bee49
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-file-comments/.env
@@ -0,0 +1,2 @@
+COMMENT=1234#5
+NO_COMMENT="1234#5"
diff --git a/pkg/e2e/fixtures/environment/env-file-comments/Dockerfile b/pkg/e2e/fixtures/environment/env-file-comments/Dockerfile
new file mode 100644
index 00000000000..6c6972d6a64
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-file-comments/Dockerfile
@@ -0,0 +1,18 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM alpine
+ENV COMMENT=Dockerfile
+ENV NO_COMMENT=Dockerfile
+CMD ["sh", "-c", "printenv", "|", "grep", "COMMENT"]
diff --git a/pkg/e2e/fixtures/environment/env-file-comments/compose.yaml b/pkg/e2e/fixtures/environment/env-file-comments/compose.yaml
new file mode 100644
index 00000000000..718968660a6
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-file-comments/compose.yaml
@@ -0,0 +1,5 @@
+services:
+ env-file-comments:
+ build:
+ context: .
+ image: env-file-comments
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/environment/env-interpolation-default-value/.env b/pkg/e2e/fixtures/environment/env-interpolation-default-value/.env
new file mode 100644
index 00000000000..79a91230afa
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-interpolation-default-value/.env
@@ -0,0 +1 @@
+IMAGE=default_env:${WHEREAMI:-EnvFileDefaultValue}
diff --git a/pkg/e2e/fixtures/environment/env-interpolation-default-value/compose.yaml b/pkg/e2e/fixtures/environment/env-interpolation-default-value/compose.yaml
new file mode 100644
index 00000000000..4d02fcdaa96
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-interpolation-default-value/compose.yaml
@@ -0,0 +1,6 @@
+services:
+ env-interpolation:
+ image: bash
+ environment:
+ IMAGE: ${IMAGE}
+ command: echo "$IMAGE"
diff --git a/pkg/e2e/fixtures/environment/env-interpolation/.env b/pkg/e2e/fixtures/environment/env-interpolation/.env
new file mode 100644
index 00000000000..b3a1dfee365
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-interpolation/.env
@@ -0,0 +1,2 @@
+WHEREAMI=EnvFile
+IMAGE=default_env:${WHEREAMI}
diff --git a/pkg/e2e/fixtures/environment/env-interpolation/compose.yaml b/pkg/e2e/fixtures/environment/env-interpolation/compose.yaml
new file mode 100644
index 00000000000..7a4b3865f64
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-interpolation/compose.yaml
@@ -0,0 +1,6 @@
+services:
+ env-interpolation:
+ image: bash
+ environment:
+ IMAGE: ${IMAGE}
+ command: echo "$IMAGE"
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/environment/env-priority/.env b/pkg/e2e/fixtures/environment/env-priority/.env
new file mode 100644
index 00000000000..c93127ac675
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-priority/.env
@@ -0,0 +1 @@
+WHEREAMI=Env File
diff --git a/pkg/e2e/fixtures/environment/env-priority/.env.empty b/pkg/e2e/fixtures/environment/env-priority/.env.empty
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/pkg/e2e/fixtures/environment/env-priority/.env.override b/pkg/e2e/fixtures/environment/env-priority/.env.override
new file mode 100644
index 00000000000..398fa51b321
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-priority/.env.override
@@ -0,0 +1 @@
+WHEREAMI=override
diff --git a/pkg/e2e/fixtures/environment/env-priority/.env.override.with.default b/pkg/e2e/fixtures/environment/env-priority/.env.override.with.default
new file mode 100644
index 00000000000..35258b20ffb
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-priority/.env.override.with.default
@@ -0,0 +1 @@
+WHEREAMI=${WHEREAMI:-EnvFileDefaultValue}
diff --git a/pkg/e2e/fixtures/environment/env-priority/Dockerfile b/pkg/e2e/fixtures/environment/env-priority/Dockerfile
new file mode 100644
index 00000000000..0901119f7df
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-priority/Dockerfile
@@ -0,0 +1,17 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM alpine
+ENV WHEREAMI=Dockerfile
+CMD ["printenv", "WHEREAMI"]
diff --git a/pkg/e2e/fixtures/environment/env-priority/compose-with-env-file.yaml b/pkg/e2e/fixtures/environment/env-priority/compose-with-env-file.yaml
new file mode 100644
index 00000000000..4659830f2e6
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-priority/compose-with-env-file.yaml
@@ -0,0 +1,7 @@
+services:
+ env-compose-priority:
+ image: env-compose-priority
+ build:
+ context: .
+ env_file:
+ - .env.override
diff --git a/pkg/e2e/fixtures/environment/env-priority/compose-with-env.yaml b/pkg/e2e/fixtures/environment/env-priority/compose-with-env.yaml
new file mode 100644
index 00000000000..d8cdc140c33
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-priority/compose-with-env.yaml
@@ -0,0 +1,7 @@
+services:
+ env-compose-priority:
+ image: env-compose-priority
+ build:
+ context: .
+ environment:
+ WHEREAMI: "Compose File"
diff --git a/pkg/e2e/fixtures/environment/env-priority/compose.yaml b/pkg/e2e/fixtures/environment/env-priority/compose.yaml
new file mode 100644
index 00000000000..9d107d857d5
--- /dev/null
+++ b/pkg/e2e/fixtures/environment/env-priority/compose.yaml
@@ -0,0 +1,5 @@
+services:
+ env-compose-priority:
+ image: env-compose-priority
+ build:
+ context: .
diff --git a/pkg/e2e/fixtures/exec/compose.yaml b/pkg/e2e/fixtures/exec/compose.yaml
new file mode 100644
index 00000000000..8920173bf45
--- /dev/null
+++ b/pkg/e2e/fixtures/exec/compose.yaml
@@ -0,0 +1,4 @@
+services:
+ test:
+ image: alpine
+ command: tail -f /dev/null
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/export/compose.yaml b/pkg/e2e/fixtures/export/compose.yaml
new file mode 100644
index 00000000000..28e4b15bd68
--- /dev/null
+++ b/pkg/e2e/fixtures/export/compose.yaml
@@ -0,0 +1,9 @@
+services:
+ service:
+ image: alpine
+ command: sleep infinity
+ service-with-replicas:
+ image: alpine
+ command: sleep infinity
+ deploy:
+ replicas: 3
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/external/compose.yaml b/pkg/e2e/fixtures/external/compose.yaml
new file mode 100644
index 00000000000..29b8b74a179
--- /dev/null
+++ b/pkg/e2e/fixtures/external/compose.yaml
@@ -0,0 +1,14 @@
+services:
+ test:
+ image: nginx:alpine
+
+ other:
+ image: nginx:alpine
+ networks:
+ test_network:
+ ipv4_address: 8.8.8.8
+
+networks:
+ test_network:
+ external: true
+ name: foo_bar
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/hooks/compose.yaml b/pkg/e2e/fixtures/hooks/compose.yaml
new file mode 100644
index 00000000000..b45a65df063
--- /dev/null
+++ b/pkg/e2e/fixtures/hooks/compose.yaml
@@ -0,0 +1,14 @@
+services:
+ sample:
+ image: nginx
+ volumes:
+ - data:/data
+ pre_stop:
+ - command: sh -c 'echo "In the pre-stop" >> /data/log.txt'
+ test:
+ image: nginx
+ post_start:
+ - command: sh -c 'echo env'
+volumes:
+ data:
+ name: sample-data
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/hooks/poststart/compose-error.yaml b/pkg/e2e/fixtures/hooks/poststart/compose-error.yaml
new file mode 100644
index 00000000000..2d5bf5c44da
--- /dev/null
+++ b/pkg/e2e/fixtures/hooks/poststart/compose-error.yaml
@@ -0,0 +1,6 @@
+services:
+
+ test:
+ image: nginx
+ post_start:
+ - command: sh -c 'command in error'
diff --git a/pkg/e2e/fixtures/hooks/poststart/compose-success.yaml b/pkg/e2e/fixtures/hooks/poststart/compose-success.yaml
new file mode 100644
index 00000000000..7277945883c
--- /dev/null
+++ b/pkg/e2e/fixtures/hooks/poststart/compose-success.yaml
@@ -0,0 +1,5 @@
+services:
+ test:
+ image: nginx
+ post_start:
+ - command: sh -c 'echo env'
diff --git a/pkg/e2e/fixtures/hooks/prestop/compose-error.yaml b/pkg/e2e/fixtures/hooks/prestop/compose-error.yaml
new file mode 100644
index 00000000000..1beb6c7e519
--- /dev/null
+++ b/pkg/e2e/fixtures/hooks/prestop/compose-error.yaml
@@ -0,0 +1,10 @@
+services:
+ sample:
+ image: nginx
+ volumes:
+ - data:/data
+ pre_stop:
+ - command: sh -c 'command in error'
+volumes:
+ data:
+ name: sample-data
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/hooks/prestop/compose-success.yaml b/pkg/e2e/fixtures/hooks/prestop/compose-success.yaml
new file mode 100644
index 00000000000..0d80582117c
--- /dev/null
+++ b/pkg/e2e/fixtures/hooks/prestop/compose-success.yaml
@@ -0,0 +1,10 @@
+services:
+ sample:
+ image: nginx
+ volumes:
+ - data:/data
+ pre_stop:
+ - command: sh -c 'echo "In the pre-stop" >> /data/log.txt'
+volumes:
+ data:
+ name: sample-data
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/ipam/compose.yaml b/pkg/e2e/fixtures/ipam/compose.yaml
index 4cc479ed4cf..632690d2f17 100644
--- a/pkg/e2e/fixtures/ipam/compose.yaml
+++ b/pkg/e2e/fixtures/ipam/compose.yaml
@@ -1,6 +1,7 @@
services:
foo:
image: alpine
+ init: true
entrypoint: ["sleep", "600"]
networks:
default:
@@ -9,4 +10,4 @@ networks:
default:
ipam:
config:
- - subnet: 10.1.0.0/16
\ No newline at end of file
+ - subnet: 10.1.0.0/16
diff --git a/pkg/e2e/fixtures/links/compose.yaml b/pkg/e2e/fixtures/links/compose.yaml
new file mode 100644
index 00000000000..8c182c4d24d
--- /dev/null
+++ b/pkg/e2e/fixtures/links/compose.yaml
@@ -0,0 +1,8 @@
+services:
+ foo:
+ image: nginx:alpine
+ links:
+ - bar
+
+ bar:
+ image: nginx:alpine
diff --git a/pkg/e2e/fixtures/logging-driver/compose.yaml b/pkg/e2e/fixtures/logging-driver/compose.yaml
new file mode 100644
index 00000000000..37b3e8b3e4d
--- /dev/null
+++ b/pkg/e2e/fixtures/logging-driver/compose.yaml
@@ -0,0 +1,19 @@
+services:
+ fluentbit:
+ image: fluent/fluent-bit:3.1.7-debug
+ ports:
+ - "24224:24224"
+ - "24224:24224/udp"
+ environment:
+ FOO: ${BAR}
+
+ app:
+ image: nginx
+ depends_on:
+ fluentbit:
+ condition: service_started
+ restart: true
+ logging:
+ driver: fluentd
+ options:
+ fluentd-address: ${HOST:-127.0.0.1}:24224
diff --git a/pkg/e2e/fixtures/logs-test/cat.yaml b/pkg/e2e/fixtures/logs-test/cat.yaml
new file mode 100644
index 00000000000..76bd5a9ab64
--- /dev/null
+++ b/pkg/e2e/fixtures/logs-test/cat.yaml
@@ -0,0 +1,6 @@
+services:
+ test:
+ image: alpine
+ command: cat /text_file.txt
+ volumes:
+ - ${FILE}:/text_file.txt
diff --git a/pkg/e2e/fixtures/logs-test/compose.yaml b/pkg/e2e/fixtures/logs-test/compose.yaml
index 2b2c4c3bb73..5f9ba0e9dee 100644
--- a/pkg/e2e/fixtures/logs-test/compose.yaml
+++ b/pkg/e2e/fixtures/logs-test/compose.yaml
@@ -1,7 +1,10 @@
services:
ping:
image: alpine
- command: ping localhost -c 1
+ init: true
+ command: ping localhost -c ${REPEAT:-1}
hello:
image: alpine
command: echo hello
+ deploy:
+ replicas: 2
diff --git a/pkg/e2e/fixtures/logs-test/restart.yaml b/pkg/e2e/fixtures/logs-test/restart.yaml
new file mode 100644
index 00000000000..35a2abb3def
--- /dev/null
+++ b/pkg/e2e/fixtures/logs-test/restart.yaml
@@ -0,0 +1,5 @@
+services:
+ ping:
+ image: alpine
+ command: "sh -c 'ping -c 2 localhost && exit 1'"
+ restart: "on-failure:2"
diff --git a/pkg/e2e/fixtures/model/compose.yaml b/pkg/e2e/fixtures/model/compose.yaml
new file mode 100644
index 00000000000..f9eb8f6ee44
--- /dev/null
+++ b/pkg/e2e/fixtures/model/compose.yaml
@@ -0,0 +1,9 @@
+services:
+ test:
+ image: alpine/curl
+ models:
+ - foo
+
+models:
+ foo:
+ model: ai/smollm2
diff --git a/pkg/e2e/fixtures/nested/.env b/pkg/e2e/fixtures/nested/.env
new file mode 100644
index 00000000000..df2676a3740
--- /dev/null
+++ b/pkg/e2e/fixtures/nested/.env
@@ -0,0 +1,2 @@
+ROOT=root
+WIN=root
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/nested/compose.yaml b/pkg/e2e/fixtures/nested/compose.yaml
new file mode 100644
index 00000000000..d449f943f1c
--- /dev/null
+++ b/pkg/e2e/fixtures/nested/compose.yaml
@@ -0,0 +1,4 @@
+services:
+ echo:
+ image: alpine
+ command: echo $ROOT $SUB win=$WIN
diff --git a/pkg/e2e/fixtures/nested/sub/.env b/pkg/e2e/fixtures/nested/sub/.env
new file mode 100644
index 00000000000..b930a819159
--- /dev/null
+++ b/pkg/e2e/fixtures/nested/sub/.env
@@ -0,0 +1,2 @@
+SUB=sub
+WIN=sub
diff --git a/pkg/e2e/fixtures/network-interface-name/compose.yaml b/pkg/e2e/fixtures/network-interface-name/compose.yaml
new file mode 100644
index 00000000000..701830a48a5
--- /dev/null
+++ b/pkg/e2e/fixtures/network-interface-name/compose.yaml
@@ -0,0 +1,7 @@
+services:
+ test:
+ image: alpine
+ command: ip link show
+ networks:
+ default:
+ interface_name: foobar
diff --git a/pkg/e2e/fixtures/network-links/compose.yaml b/pkg/e2e/fixtures/network-links/compose.yaml
new file mode 100644
index 00000000000..c09a33fcdaa
--- /dev/null
+++ b/pkg/e2e/fixtures/network-links/compose.yaml
@@ -0,0 +1,9 @@
+services:
+ container1:
+ image: nginx
+ network_mode: bridge
+ container2:
+ image: nginx
+ network_mode: bridge
+ links:
+ - container1
diff --git a/pkg/e2e/fixtures/network-recreate/compose.yaml b/pkg/e2e/fixtures/network-recreate/compose.yaml
new file mode 100644
index 00000000000..06a0a3e634a
--- /dev/null
+++ b/pkg/e2e/fixtures/network-recreate/compose.yaml
@@ -0,0 +1,10 @@
+services:
+ web:
+ image: nginx
+ networks:
+ - test
+
+networks:
+ test:
+ labels:
+ - foo=${FOO:-foo}
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/network-test/compose.subnet.yaml b/pkg/e2e/fixtures/network-test/compose.subnet.yaml
new file mode 100644
index 00000000000..46a358f6205
--- /dev/null
+++ b/pkg/e2e/fixtures/network-test/compose.subnet.yaml
@@ -0,0 +1,12 @@
+services:
+ test:
+ image: nginx:alpine
+ networks:
+ - test
+
+networks:
+ test:
+ ipam:
+ config:
+ - subnet: ${SUBNET-172.99.0.0/16}
+
diff --git a/pkg/e2e/fixtures/network-test/compose.yaml b/pkg/e2e/fixtures/network-test/compose.yaml
index 0045ec7b5bd..608007ec34e 100644
--- a/pkg/e2e/fixtures/network-test/compose.yaml
+++ b/pkg/e2e/fixtures/network-test/compose.yaml
@@ -6,10 +6,14 @@ services:
- MYSQL_ALLOW_EMPTY_PASSWORD=yes
db:
image: gtardif/sentences-db
+ init: true
networks:
- dbnet
+ - closesnetworkname1
+ - closesnetworkname2
words:
image: gtardif/sentences-api
+ init: true
ports:
- "8080:8080"
networks:
@@ -17,6 +21,7 @@ services:
- servicenet
web:
image: gtardif/sentences-web
+ init: true
ports:
- "80:80"
labels:
@@ -28,3 +33,7 @@ networks:
dbnet:
servicenet:
name: microservices
+ closesnetworkname1:
+ name: closenamenet
+ closesnetworkname2:
+ name: closenamenet-2
diff --git a/pkg/e2e/fixtures/network-test/mac_address.yaml b/pkg/e2e/fixtures/network-test/mac_address.yaml
new file mode 100644
index 00000000000..60e3861a2d8
--- /dev/null
+++ b/pkg/e2e/fixtures/network-test/mac_address.yaml
@@ -0,0 +1,4 @@
+services:
+ test:
+ image: nginx:alpine
+ mac_address: 00:e0:84:35:d0:e8
diff --git a/pkg/e2e/fixtures/no-deps/network-mode.yaml b/pkg/e2e/fixtures/no-deps/network-mode.yaml
new file mode 100644
index 00000000000..aab03f5b12d
--- /dev/null
+++ b/pkg/e2e/fixtures/no-deps/network-mode.yaml
@@ -0,0 +1,7 @@
+services:
+ app:
+ image: nginx:alpine
+ network_mode: service:db
+
+ db:
+ image: nginx:alpine
diff --git a/pkg/e2e/fixtures/no-deps/volume-from.yaml b/pkg/e2e/fixtures/no-deps/volume-from.yaml
new file mode 100644
index 00000000000..96b35761e0f
--- /dev/null
+++ b/pkg/e2e/fixtures/no-deps/volume-from.yaml
@@ -0,0 +1,10 @@
+services:
+ app:
+ image: nginx:alpine
+ volumes_from:
+ - db
+
+ db:
+ image: nginx:alpine
+ volumes:
+ - /var/data
diff --git a/pkg/e2e/fixtures/orphans/.env b/pkg/e2e/fixtures/orphans/.env
new file mode 100644
index 00000000000..717e3306ba7
--- /dev/null
+++ b/pkg/e2e/fixtures/orphans/.env
@@ -0,0 +1 @@
+COMPOSE_REMOVE_ORPHANS=true
diff --git a/pkg/e2e/fixtures/orphans/compose.yaml b/pkg/e2e/fixtures/orphans/compose.yaml
new file mode 100644
index 00000000000..33dbac0d26a
--- /dev/null
+++ b/pkg/e2e/fixtures/orphans/compose.yaml
@@ -0,0 +1,7 @@
+services:
+ orphan:
+ profiles: [run]
+ image: alpine
+ command: echo hello
+ test:
+ image: nginx:alpine
diff --git a/pkg/e2e/fixtures/pause/compose.yaml b/pkg/e2e/fixtures/pause/compose.yaml
new file mode 100644
index 00000000000..615fcad5883
--- /dev/null
+++ b/pkg/e2e/fixtures/pause/compose.yaml
@@ -0,0 +1,17 @@
+services:
+ a:
+ image: nginx:alpine
+ ports: [80]
+ healthcheck:
+ test: wget --spider -S -T1 http://localhost:80
+ interval: 1s
+ timeout: 1s
+ b:
+ image: nginx:alpine
+ ports: [80]
+ depends_on:
+ - a
+ healthcheck:
+ test: wget --spider -S -T1 http://localhost:80
+ interval: 1s
+ timeout: 1s
diff --git a/pkg/e2e/fixtures/port-range/compose.yaml b/pkg/e2e/fixtures/port-range/compose.yaml
new file mode 100644
index 00000000000..65f6fde6184
--- /dev/null
+++ b/pkg/e2e/fixtures/port-range/compose.yaml
@@ -0,0 +1,16 @@
+services:
+ a:
+ image: nginx:alpine
+ scale: 5
+ ports:
+ - "6005-6015:80"
+
+ b:
+ image: nginx:alpine
+ ports:
+ - 80
+
+ c:
+ image: nginx:alpine
+ ports:
+ - 80
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/profiles/compose.yaml b/pkg/e2e/fixtures/profiles/compose.yaml
new file mode 100644
index 00000000000..144cd3cd79b
--- /dev/null
+++ b/pkg/e2e/fixtures/profiles/compose.yaml
@@ -0,0 +1,8 @@
+services:
+ regular-service:
+ image: nginx:alpine
+
+ profiled-service:
+ image: nginx:alpine
+ profiles:
+ - test-profile
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/profiles/docker-compose.yaml b/pkg/e2e/fixtures/profiles/docker-compose.yaml
new file mode 100644
index 00000000000..134d7bbc9bf
--- /dev/null
+++ b/pkg/e2e/fixtures/profiles/docker-compose.yaml
@@ -0,0 +1,15 @@
+services:
+ foo:
+ container_name: foo_c
+ profiles: [ test ]
+ image: alpine
+ depends_on: [ db ]
+
+ bar:
+ container_name: bar_c
+ profiles: [ test ]
+ image: alpine
+
+ db:
+ container_name: db_c
+ image: alpine
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/profiles/test-profile.env b/pkg/e2e/fixtures/profiles/test-profile.env
new file mode 100644
index 00000000000..efb732b1ed2
--- /dev/null
+++ b/pkg/e2e/fixtures/profiles/test-profile.env
@@ -0,0 +1 @@
+COMPOSE_PROFILES=test-profile
diff --git a/pkg/e2e/fixtures/project-volume-bind-test/docker-compose.yml b/pkg/e2e/fixtures/project-volume-bind-test/docker-compose.yml
new file mode 100644
index 00000000000..7e1795719df
--- /dev/null
+++ b/pkg/e2e/fixtures/project-volume-bind-test/docker-compose.yml
@@ -0,0 +1,14 @@
+services:
+ frontend:
+ image: nginx
+ container_name: frontend
+ volumes:
+ - project-data:/data
+
+volumes:
+ project-data:
+ driver: local
+ driver_opts:
+ type: none
+ o: bind
+ device: "${TEST_DIR}"
diff --git a/pkg/e2e/fixtures/providers/depends-on-multiple-providers.yaml b/pkg/e2e/fixtures/providers/depends-on-multiple-providers.yaml
new file mode 100644
index 00000000000..6faec7cdc5e
--- /dev/null
+++ b/pkg/e2e/fixtures/providers/depends-on-multiple-providers.yaml
@@ -0,0 +1,21 @@
+services:
+ test:
+ image: alpine
+ command: env
+ depends_on:
+ - provider1
+ - provider2
+ provider1:
+ provider:
+ type: example-provider
+ options:
+ name: provider1
+ type: test1
+ size: 1
+ provider2:
+ provider:
+ type: example-provider
+ options:
+ name: provider2
+ type: test2
+ size: 2
diff --git a/pkg/e2e/fixtures/ps-test/compose.yaml b/pkg/e2e/fixtures/ps-test/compose.yaml
new file mode 100644
index 00000000000..08781e6a2a6
--- /dev/null
+++ b/pkg/e2e/fixtures/ps-test/compose.yaml
@@ -0,0 +1,12 @@
+services:
+ nginx:
+ image: nginx:latest
+ expose:
+ - '80'
+ - '443'
+ - '8080'
+ busybox:
+ image: busybox
+ command: busybox httpd -f -p 8000
+ ports:
+ - '127.0.0.1:8001:8000'
diff --git a/scripts/validate/template/bash.txt b/pkg/e2e/fixtures/publish/Dockerfile
similarity index 96%
rename from scripts/validate/template/bash.txt
rename to pkg/e2e/fixtures/publish/Dockerfile
index 31e00001cf2..b17b6b06175 100644
--- a/scripts/validate/template/bash.txt
+++ b/pkg/e2e/fixtures/publish/Dockerfile
@@ -11,3 +11,5 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
+FROM alpine:latest
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/publish/common.yaml b/pkg/e2e/fixtures/publish/common.yaml
new file mode 100644
index 00000000000..c8022b46873
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/common.yaml
@@ -0,0 +1,3 @@
+services:
+ foo:
+ image: bar
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/publish/compose-bind-mount.yml b/pkg/e2e/fixtures/publish/compose-bind-mount.yml
new file mode 100644
index 00000000000..ecfc700d1c6
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/compose-bind-mount.yml
@@ -0,0 +1,5 @@
+services:
+ serviceA:
+ image: a
+ volumes:
+ - .:/user-data
diff --git a/pkg/e2e/fixtures/publish/compose-build-only.yml b/pkg/e2e/fixtures/publish/compose-build-only.yml
new file mode 100644
index 00000000000..e4736d983df
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/compose-build-only.yml
@@ -0,0 +1,9 @@
+services:
+ serviceA:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ serviceB:
+ build:
+ context: .
+ dockerfile: Dockerfile
diff --git a/pkg/e2e/fixtures/publish/compose-env-file.yml b/pkg/e2e/fixtures/publish/compose-env-file.yml
new file mode 100644
index 00000000000..b438c71daba
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/compose-env-file.yml
@@ -0,0 +1,7 @@
+services:
+ serviceA:
+ image: "alpine:3.12"
+ env_file:
+ - publish.env
+ serviceB:
+ image: "alpine:3.12"
diff --git a/pkg/e2e/fixtures/publish/compose-environment.yml b/pkg/e2e/fixtures/publish/compose-environment.yml
new file mode 100644
index 00000000000..27e3a4b31bf
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/compose-environment.yml
@@ -0,0 +1,7 @@
+services:
+ serviceA:
+ image: "alpine:3.12"
+ environment:
+ - "FOO=bar"
+ serviceB:
+ image: "alpine:3.12"
diff --git a/pkg/e2e/fixtures/publish/compose-local-include.yml b/pkg/e2e/fixtures/publish/compose-local-include.yml
new file mode 100644
index 00000000000..1af74e926eb
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/compose-local-include.yml
@@ -0,0 +1,6 @@
+include:
+ - common.yaml
+
+services:
+ test:
+ image: test
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/publish/compose-multi-env-config.yml b/pkg/e2e/fixtures/publish/compose-multi-env-config.yml
new file mode 100644
index 00000000000..35a75eab36a
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/compose-multi-env-config.yml
@@ -0,0 +1,11 @@
+services:
+ serviceA:
+ image: "alpine:3.12"
+ environment:
+ - "FOO=bar"
+ serviceB:
+ image: "alpine:3.12"
+ env_file:
+ - publish.env
+ environment:
+ - "BAR=baz"
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/publish/compose-sensitive.yml b/pkg/e2e/fixtures/publish/compose-sensitive.yml
new file mode 100644
index 00000000000..68dd59b83e7
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/compose-sensitive.yml
@@ -0,0 +1,20 @@
+services:
+ serviceA:
+ image: "alpine:3.12"
+ environment:
+ - AWS_ACCESS_KEY_ID=A3TX1234567890ABCDEF
+ - AWS_SECRET_ACCESS_KEY=aws"12345+67890/abcdefghijklm+NOPQRSTUVWXYZ+"
+ configs:
+ - myconfig
+ serviceB:
+ image: "alpine:3.12"
+ env_file:
+ - publish-sensitive.env
+ secrets:
+ - mysecret
+configs:
+ myconfig:
+ file: config.txt
+secrets:
+ mysecret:
+ file: secret.txt
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/publish/compose-with-extends.yml b/pkg/e2e/fixtures/publish/compose-with-extends.yml
new file mode 100644
index 00000000000..f8c1349f5f5
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/compose-with-extends.yml
@@ -0,0 +1,5 @@
+services:
+ test:
+ extends:
+ file: common.yaml
+ service: foo
diff --git a/pkg/e2e/fixtures/publish/config.txt b/pkg/e2e/fixtures/publish/config.txt
new file mode 100644
index 00000000000..32501ce3ed9
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/config.txt
@@ -0,0 +1 @@
+eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/publish/oci/compose-override.yaml b/pkg/e2e/fixtures/publish/oci/compose-override.yaml
new file mode 100644
index 00000000000..c8947e610a8
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/oci/compose-override.yaml
@@ -0,0 +1,3 @@
+services:
+ app:
+ env_file: test.env
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/publish/oci/compose.yaml b/pkg/e2e/fixtures/publish/oci/compose.yaml
new file mode 100644
index 00000000000..369094bec14
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/oci/compose.yaml
@@ -0,0 +1,5 @@
+services:
+ app:
+ extends:
+ file: extends.yaml
+ service: test
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/publish/oci/extends.yaml b/pkg/e2e/fixtures/publish/oci/extends.yaml
new file mode 100644
index 00000000000..9184173d087
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/oci/extends.yaml
@@ -0,0 +1,3 @@
+services:
+ test:
+ image: alpine
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/publish/oci/test.env b/pkg/e2e/fixtures/publish/oci/test.env
new file mode 100644
index 00000000000..6e1f61b59ea
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/oci/test.env
@@ -0,0 +1 @@
+HELLO=WORLD
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/publish/publish-sensitive.env b/pkg/e2e/fixtures/publish/publish-sensitive.env
new file mode 100644
index 00000000000..6ced3351048
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/publish-sensitive.env
@@ -0,0 +1 @@
+GITHUB_TOKEN=ghp_1234567890abcdefghijklmnopqrstuvwxyz
diff --git a/pkg/e2e/fixtures/publish/publish.env b/pkg/e2e/fixtures/publish/publish.env
new file mode 100644
index 00000000000..62eddb614c6
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/publish.env
@@ -0,0 +1,2 @@
+FOO=bar
+QUIX=
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/publish/secret.txt b/pkg/e2e/fixtures/publish/secret.txt
new file mode 100644
index 00000000000..5df0a6eeac3
--- /dev/null
+++ b/pkg/e2e/fixtures/publish/secret.txt
@@ -0,0 +1,3 @@
+-----BEGIN DSA PRIVATE KEY-----
+wxyz+ABC=
+-----END DSA PRIVATE KEY-----
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/recreate-volumes/bind.yaml b/pkg/e2e/fixtures/recreate-volumes/bind.yaml
new file mode 100644
index 00000000000..a67244ca530
--- /dev/null
+++ b/pkg/e2e/fixtures/recreate-volumes/bind.yaml
@@ -0,0 +1,5 @@
+services:
+ app:
+ image: alpine
+ volumes:
+ - .:/my_vol
diff --git a/pkg/e2e/fixtures/recreate-volumes/compose.yaml b/pkg/e2e/fixtures/recreate-volumes/compose.yaml
new file mode 100644
index 00000000000..e0e40c721b7
--- /dev/null
+++ b/pkg/e2e/fixtures/recreate-volumes/compose.yaml
@@ -0,0 +1,10 @@
+services:
+ app:
+ image: alpine
+ volumes:
+ - my_vol:/my_vol
+
+volumes:
+ my_vol:
+ labels:
+ foo: bar
diff --git a/pkg/e2e/fixtures/recreate-volumes/compose2.yaml b/pkg/e2e/fixtures/recreate-volumes/compose2.yaml
new file mode 100644
index 00000000000..96a073f0516
--- /dev/null
+++ b/pkg/e2e/fixtures/recreate-volumes/compose2.yaml
@@ -0,0 +1,10 @@
+services:
+ app:
+ image: alpine
+ volumes:
+ - my_vol:/my_vol
+
+volumes:
+ my_vol:
+ labels:
+ foo: zot
diff --git a/pkg/e2e/fixtures/resources/compose.yaml b/pkg/e2e/fixtures/resources/compose.yaml
new file mode 100644
index 00000000000..4823688803e
--- /dev/null
+++ b/pkg/e2e/fixtures/resources/compose.yaml
@@ -0,0 +1,5 @@
+volumes:
+ my_vol: {}
+
+networks:
+ my_net: {}
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/restart-test/compose-depends-on.yaml b/pkg/e2e/fixtures/restart-test/compose-depends-on.yaml
new file mode 100644
index 00000000000..092d862f698
--- /dev/null
+++ b/pkg/e2e/fixtures/restart-test/compose-depends-on.yaml
@@ -0,0 +1,27 @@
+services:
+ with-restart:
+ image: nginx:alpine
+ init: true
+ command: tail -f /dev/null
+ stop_signal: SIGTERM
+ depends_on:
+ nginx: {condition: service_healthy, restart: true}
+
+ no-restart:
+ image: nginx:alpine
+ init: true
+ command: tail -f /dev/null
+ stop_signal: SIGTERM
+ depends_on:
+ nginx: { condition: service_healthy }
+
+ nginx:
+ image: nginx:alpine
+ labels:
+ TEST: ${LABEL:-test}
+ stop_signal: SIGTERM
+ healthcheck:
+ test: "echo | nc -w 5 localhost:80"
+ interval: 2s
+ timeout: 1s
+ retries: 10
diff --git a/pkg/e2e/fixtures/restart-test/compose.yaml b/pkg/e2e/fixtures/restart-test/compose.yaml
index 34038d5c06b..92c28d3a706 100644
--- a/pkg/e2e/fixtures/restart-test/compose.yaml
+++ b/pkg/e2e/fixtures/restart-test/compose.yaml
@@ -1,4 +1,12 @@
services:
restart:
image: alpine
+ init: true
+ command: ash -c "if [[ -f /tmp/restart.lock ]] ; then sleep infinity; else touch /tmp/restart.lock; fi"
+
+ test:
+ profiles:
+ - test
+ image: alpine
+ init: true
command: ash -c "if [[ -f /tmp/restart.lock ]] ; then sleep infinity; else touch /tmp/restart.lock; fi"
diff --git a/pkg/e2e/fixtures/run-test/build-once-nested.yaml b/pkg/e2e/fixtures/run-test/build-once-nested.yaml
new file mode 100644
index 00000000000..4972db5a7bb
--- /dev/null
+++ b/pkg/e2e/fixtures/run-test/build-once-nested.yaml
@@ -0,0 +1,32 @@
+services:
+ # Database service with build
+ db:
+ pull_policy: build
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ RUN echo "DB built at $(date)" > /db-build.txt
+ CMD sleep 3600
+
+ # API service that depends on db
+ api:
+ pull_policy: build
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ RUN echo "API built at $(date)" > /api-build.txt
+ CMD sleep 3600
+ depends_on:
+ - db
+
+ # App service that depends on api (which depends on db)
+ app:
+ pull_policy: build
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ RUN echo "App built at $(date)" > /app-build.txt
+ CMD echo "App running"
+ depends_on:
+ - api
+
diff --git a/pkg/e2e/fixtures/run-test/build-once-no-deps.yaml b/pkg/e2e/fixtures/run-test/build-once-no-deps.yaml
new file mode 100644
index 00000000000..36f4258b380
--- /dev/null
+++ b/pkg/e2e/fixtures/run-test/build-once-no-deps.yaml
@@ -0,0 +1,10 @@
+services:
+ # Simple service with no dependencies
+ simple:
+ pull_policy: build
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ RUN echo "Simple built at $(date)" > /build.txt
+ CMD echo "Simple service"
+
diff --git a/pkg/e2e/fixtures/run-test/build-once.yaml b/pkg/e2e/fixtures/run-test/build-once.yaml
new file mode 100644
index 00000000000..7a6f84dbc42
--- /dev/null
+++ b/pkg/e2e/fixtures/run-test/build-once.yaml
@@ -0,0 +1,18 @@
+services:
+ # Service with pull_policy: build to ensure it always rebuilds
+ # This is the key to testing the bug - without the fix, this would build twice
+ nginx:
+ pull_policy: build
+ build:
+ dockerfile_inline: |
+ FROM alpine
+ RUN echo "Nginx built at $(date)" > /build-time.txt
+ CMD sleep 3600
+
+ # Service that depends on nginx
+ curl:
+ image: alpine
+ depends_on:
+ - nginx
+ command: echo "curl service"
+
diff --git a/pkg/e2e/fixtures/run-test/compose.yaml b/pkg/e2e/fixtures/run-test/compose.yaml
index 0168dc240ba..aef80119039 100644
--- a/pkg/e2e/fixtures/run-test/compose.yaml
+++ b/pkg/e2e/fixtures/run-test/compose.yaml
@@ -1,4 +1,3 @@
-version: '3.8'
services:
back:
image: alpine
@@ -17,6 +16,14 @@ services:
image: nginx:alpine
networks:
- frontnet
+ build:
+ build:
+ dockerfile_inline: "FROM base"
+ additional_contexts:
+ base: "service:build_base"
+ build_base:
+ build:
+ dockerfile_inline: "FROM alpine"
networks:
frontnet:
backnet:
diff --git a/pkg/e2e/fixtures/run-test/deps.yaml b/pkg/e2e/fixtures/run-test/deps.yaml
new file mode 100644
index 00000000000..6e0e394a32f
--- /dev/null
+++ b/pkg/e2e/fixtures/run-test/deps.yaml
@@ -0,0 +1,13 @@
+services:
+ service_a:
+ image: bash
+ command: echo "a"
+ depends_on:
+ - shared_dep
+ service_b:
+ image: bash
+ command: echo "b"
+ depends_on:
+ - shared_dep
+ shared_dep:
+ image: bash
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/run-test/orphan.yaml b/pkg/e2e/fixtures/run-test/orphan.yaml
index a2aedb3736d..b059acc9759 100644
--- a/pkg/e2e/fixtures/run-test/orphan.yaml
+++ b/pkg/e2e/fixtures/run-test/orphan.yaml
@@ -1,4 +1,3 @@
-version: '3.8'
services:
simple:
image: alpine
diff --git a/pkg/e2e/fixtures/run-test/piped-test.yaml b/pkg/e2e/fixtures/run-test/piped-test.yaml
new file mode 100644
index 00000000000..247bd923ace
--- /dev/null
+++ b/pkg/e2e/fixtures/run-test/piped-test.yaml
@@ -0,0 +1,9 @@
+services:
+ piped-test:
+ image: alpine
+ command: cat
+ # Service that will receive piped input and echo it back
+ tty-test:
+ image: alpine
+ command: sh -c "if [ -t 0 ]; then echo 'TTY detected'; else echo 'No TTY detected'; fi"
+ # Service to test TTY detection
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/run-test/ports.yaml b/pkg/e2e/fixtures/run-test/ports.yaml
new file mode 100644
index 00000000000..f6f93aa10e1
--- /dev/null
+++ b/pkg/e2e/fixtures/run-test/ports.yaml
@@ -0,0 +1,5 @@
+services:
+ back:
+ image: alpine
+ ports:
+ - 8082:80
diff --git a/pkg/e2e/fixtures/run-test/pull.yaml b/pkg/e2e/fixtures/run-test/pull.yaml
new file mode 100644
index 00000000000..223fecd50d7
--- /dev/null
+++ b/pkg/e2e/fixtures/run-test/pull.yaml
@@ -0,0 +1,4 @@
+services:
+ backend:
+ image: nginx
+ command: nginx -t
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/run-test/quiet-pull.yaml b/pkg/e2e/fixtures/run-test/quiet-pull.yaml
new file mode 100644
index 00000000000..922676363f5
--- /dev/null
+++ b/pkg/e2e/fixtures/run-test/quiet-pull.yaml
@@ -0,0 +1,3 @@
+services:
+ backend:
+ image: hello-world
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/run-test/run.env b/pkg/e2e/fixtures/run-test/run.env
new file mode 100644
index 00000000000..6ac867af71b
--- /dev/null
+++ b/pkg/e2e/fixtures/run-test/run.env
@@ -0,0 +1 @@
+FOO=BAR
diff --git a/pkg/e2e/fixtures/scale/Dockerfile b/pkg/e2e/fixtures/scale/Dockerfile
new file mode 100644
index 00000000000..7f341f9525b
--- /dev/null
+++ b/pkg/e2e/fixtures/scale/Dockerfile
@@ -0,0 +1,17 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM nginx:alpine
+ARG FOO
+LABEL FOO=$FOO
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/scale/build.yaml b/pkg/e2e/fixtures/scale/build.yaml
new file mode 100644
index 00000000000..cd109c7a849
--- /dev/null
+++ b/pkg/e2e/fixtures/scale/build.yaml
@@ -0,0 +1,3 @@
+services:
+ test:
+ build: .
diff --git a/pkg/e2e/fixtures/scale/compose.yaml b/pkg/e2e/fixtures/scale/compose.yaml
new file mode 100644
index 00000000000..619630876b1
--- /dev/null
+++ b/pkg/e2e/fixtures/scale/compose.yaml
@@ -0,0 +1,17 @@
+services:
+ back:
+ image: nginx:alpine
+ depends_on:
+ - db
+ db:
+ image: nginx:alpine
+ environment:
+ - MAYBE
+ front:
+ image: nginx:alpine
+ deploy:
+ replicas: 2
+ dbadmin:
+ image: nginx:alpine
+ deploy:
+ replicas: 0
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/sentences/compose.yaml b/pkg/e2e/fixtures/sentences/compose.yaml
index 7916d1f4b23..3cabccab16d 100644
--- a/pkg/e2e/fixtures/sentences/compose.yaml
+++ b/pkg/e2e/fixtures/sentences/compose.yaml
@@ -1,12 +1,15 @@
services:
db:
image: gtardif/sentences-db
+ init: true
words:
image: gtardif/sentences-api
+ init: true
ports:
- "95:8080"
web:
image: gtardif/sentences-web
+ init: true
ports:
- "90:80"
labels:
diff --git a/pkg/e2e/fixtures/simple-build-test/compose-interpolate.yaml b/pkg/e2e/fixtures/simple-build-test/compose-interpolate.yaml
new file mode 100644
index 00000000000..57d092a9909
--- /dev/null
+++ b/pkg/e2e/fixtures/simple-build-test/compose-interpolate.yaml
@@ -0,0 +1,5 @@
+services:
+ nginx:
+ build:
+ context: nginx-build
+ dockerfile: ${MYVAR}
diff --git a/pkg/e2e/fixtures/simple-composefile/id.yaml b/pkg/e2e/fixtures/simple-composefile/id.yaml
new file mode 100644
index 00000000000..67ac13f7308
--- /dev/null
+++ b/pkg/e2e/fixtures/simple-composefile/id.yaml
@@ -0,0 +1,3 @@
+services:
+ test:
+ image: ${ID:?ID variable must be set}
diff --git a/pkg/e2e/fixtures/start-fail/compose.yaml b/pkg/e2e/fixtures/start-fail/compose.yaml
index ed7d9a122e8..4c88576c872 100644
--- a/pkg/e2e/fixtures/start-fail/compose.yaml
+++ b/pkg/e2e/fixtures/start-fail/compose.yaml
@@ -1,6 +1,7 @@
services:
fail:
image: alpine
+ init: true
command: sleep infinity
healthcheck:
test: "false"
@@ -8,6 +9,7 @@ services:
retries: 3
depends:
image: alpine
+ init: true
command: sleep infinity
depends_on:
fail:
diff --git a/pkg/e2e/fixtures/start-fail/start-depends_on-long-lived.yaml b/pkg/e2e/fixtures/start-fail/start-depends_on-long-lived.yaml
new file mode 100644
index 00000000000..a3c920e0f32
--- /dev/null
+++ b/pkg/e2e/fixtures/start-fail/start-depends_on-long-lived.yaml
@@ -0,0 +1,14 @@
+services:
+ safe:
+ image: 'alpine'
+ init: true
+ command: ['/bin/sh', '-c', 'sleep infinity'] # never exiting
+ failure:
+ image: 'alpine'
+ init: true
+ command: ['/bin/sh', '-c', 'sleep 1 ; echo "exiting with error" ; exit 42']
+ test:
+ image: 'alpine'
+ init: true
+ command: ['/bin/sh', '-c', 'sleep 99999 ; echo "tests are OK"'] # very long job
+ depends_on: [safe]
diff --git a/pkg/e2e/fixtures/start-stop/other.yaml b/pkg/e2e/fixtures/start-stop/other.yaml
new file mode 100644
index 00000000000..58782726184
--- /dev/null
+++ b/pkg/e2e/fixtures/start-stop/other.yaml
@@ -0,0 +1,5 @@
+services:
+ a-different-one:
+ image: nginx:alpine
+ and-another-one:
+ image: nginx:alpine
diff --git a/pkg/e2e/fixtures/start-stop/start-stop-deps.yaml b/pkg/e2e/fixtures/start-stop/start-stop-deps.yaml
new file mode 100644
index 00000000000..fb1f7fad702
--- /dev/null
+++ b/pkg/e2e/fixtures/start-stop/start-stop-deps.yaml
@@ -0,0 +1,17 @@
+services:
+ another_2:
+ image: nginx:alpine
+ another:
+ image: nginx:alpine
+ depends_on:
+ - another_2
+ dep_2:
+ image: nginx:alpine
+ dep_1:
+ image: nginx:alpine
+ depends_on:
+ - dep_2
+ desired:
+ image: nginx:alpine
+ depends_on:
+ - dep_1
diff --git a/pkg/e2e/fixtures/start_interval/compose.yaml b/pkg/e2e/fixtures/start_interval/compose.yaml
new file mode 100644
index 00000000000..c78a9f43a05
--- /dev/null
+++ b/pkg/e2e/fixtures/start_interval/compose.yaml
@@ -0,0 +1,15 @@
+services:
+ test:
+ image: "nginx"
+ healthcheck:
+ interval: 30s
+ start_period: 10s
+ start_interval: 1s
+ test: "/bin/true"
+
+ error:
+ image: "nginx"
+ healthcheck:
+ interval: 30s
+ start_interval: 1s
+ test: "/bin/true"
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/stdout-stderr/compose.yaml b/pkg/e2e/fixtures/stdout-stderr/compose.yaml
new file mode 100644
index 00000000000..53a44b4552c
--- /dev/null
+++ b/pkg/e2e/fixtures/stdout-stderr/compose.yaml
@@ -0,0 +1,7 @@
+services:
+ stderr:
+ image: alpine
+ init: true
+ command: /bin/ash /log_to_stderr.sh
+ volumes:
+ - ./log_to_stderr.sh:/log_to_stderr.sh
diff --git a/pkg/e2e/fixtures/stdout-stderr/log_to_stderr.sh b/pkg/e2e/fixtures/stdout-stderr/log_to_stderr.sh
new file mode 100755
index 00000000000..f015ca89bea
--- /dev/null
+++ b/pkg/e2e/fixtures/stdout-stderr/log_to_stderr.sh
@@ -0,0 +1,16 @@
+# Copyright 2020 Docker Compose CLI authors
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+>&2 echo "log to stderr"
+echo "log to stdout"
diff --git a/pkg/e2e/fixtures/stop/compose.yaml b/pkg/e2e/fixtures/stop/compose.yaml
new file mode 100644
index 00000000000..f81462ae321
--- /dev/null
+++ b/pkg/e2e/fixtures/stop/compose.yaml
@@ -0,0 +1,9 @@
+services:
+ service1:
+ image: alpine
+ command: /bin/true
+ service2:
+ image: alpine
+ command: ping -c 2 localhost
+ pre_stop:
+ - command: echo "stop hook running..."
diff --git a/pkg/e2e/fixtures/switch-volumes/compose.yaml b/pkg/e2e/fixtures/switch-volumes/compose.yaml
new file mode 100644
index 00000000000..9da0dcba175
--- /dev/null
+++ b/pkg/e2e/fixtures/switch-volumes/compose.yaml
@@ -0,0 +1,10 @@
+services:
+ app:
+ image: alpine
+ volumes:
+ - my_vol:/my_vol
+
+volumes:
+ my_vol:
+ external: true
+ name: test_external_volume
diff --git a/pkg/e2e/fixtures/switch-volumes/compose2.yaml b/pkg/e2e/fixtures/switch-volumes/compose2.yaml
new file mode 100644
index 00000000000..6d52097f925
--- /dev/null
+++ b/pkg/e2e/fixtures/switch-volumes/compose2.yaml
@@ -0,0 +1,10 @@
+services:
+ app:
+ image: alpine
+ volumes:
+ - my_vol:/my_vol
+
+volumes:
+ my_vol:
+ external: true
+ name: test_external_volume_2
diff --git a/pkg/e2e/fixtures/ups-deps-stop/compose.yaml b/pkg/e2e/fixtures/ups-deps-stop/compose.yaml
new file mode 100644
index 00000000000..c99087f65a7
--- /dev/null
+++ b/pkg/e2e/fixtures/ups-deps-stop/compose.yaml
@@ -0,0 +1,11 @@
+services:
+ dependency:
+ image: alpine
+ init: true
+ command: /bin/sh -c 'while true; do echo "hello dependency"; sleep 1; done'
+
+ app:
+ depends_on: ['dependency']
+ image: alpine
+ init: true
+ command: /bin/sh -c 'while true; do echo "hello app"; sleep 1; done'
diff --git a/pkg/e2e/fixtures/ups-deps-stop/orphan.yaml b/pkg/e2e/fixtures/ups-deps-stop/orphan.yaml
new file mode 100644
index 00000000000..69e50e39ca7
--- /dev/null
+++ b/pkg/e2e/fixtures/ups-deps-stop/orphan.yaml
@@ -0,0 +1,5 @@
+services:
+ orphan:
+ image: alpine
+ init: true
+ command: /bin/sh -c 'while true; do echo "hello orphan"; sleep 1; done'
diff --git a/pkg/e2e/fixtures/volumes/compose.yaml b/pkg/e2e/fixtures/volumes/compose.yaml
new file mode 100644
index 00000000000..4aad0482b5d
--- /dev/null
+++ b/pkg/e2e/fixtures/volumes/compose.yaml
@@ -0,0 +1,10 @@
+services:
+ with_image:
+ image: alpine
+ command: "ls -al /mnt/image"
+ volumes:
+ - type: image
+ source: nginx:alpine
+ target: /mnt/image
+ image:
+ subpath: usr/share/nginx/html/
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/wait/compose.yaml b/pkg/e2e/fixtures/wait/compose.yaml
new file mode 100644
index 00000000000..1a001e6fa87
--- /dev/null
+++ b/pkg/e2e/fixtures/wait/compose.yaml
@@ -0,0 +1,11 @@
+services:
+ faster:
+ image: alpine
+ command: sleep 2
+ slower:
+ image: alpine
+ command: sleep 5
+ infinity:
+ image: alpine
+ command: sleep infinity
+
diff --git a/pkg/e2e/fixtures/watch/compose.yaml b/pkg/e2e/fixtures/watch/compose.yaml
new file mode 100644
index 00000000000..7e5e0d28bf4
--- /dev/null
+++ b/pkg/e2e/fixtures/watch/compose.yaml
@@ -0,0 +1,43 @@
+x-dev: &x-dev
+ watch:
+ - action: sync
+ path: ./data
+ target: /app/data
+ ignore:
+ - '*.foo'
+ - ./ignored
+ - action: sync+restart
+ path: ./config
+ target: /app/config
+
+services:
+ alpine:
+ build:
+ dockerfile_inline: |-
+ FROM alpine
+ RUN mkdir -p /app/data
+ RUN mkdir -p /app/config
+ init: true
+ command: sleep infinity
+ develop: *x-dev
+ busybox:
+ build:
+ dockerfile_inline: |-
+ FROM busybox
+ RUN mkdir -p /app/data
+ RUN mkdir -p /app/config
+ init: true
+ command: sleep infinity
+ develop: *x-dev
+ debian:
+ build:
+ dockerfile_inline: |-
+ FROM debian
+ RUN mkdir -p /app/data
+ RUN mkdir -p /app/config
+ init: true
+ command: sleep infinity
+ volumes:
+ - ./dat:/app/dat
+ - ./data-logs:/app/data-logs
+ develop: *x-dev
diff --git a/pkg/e2e/fixtures/watch/config/file.config b/pkg/e2e/fixtures/watch/config/file.config
new file mode 100644
index 00000000000..227b0b6ed7f
--- /dev/null
+++ b/pkg/e2e/fixtures/watch/config/file.config
@@ -0,0 +1 @@
+This is a config file
diff --git a/pkg/e2e/fixtures/watch/dat/meow.dat b/pkg/e2e/fixtures/watch/dat/meow.dat
new file mode 100644
index 00000000000..0dd3d19a01d
--- /dev/null
+++ b/pkg/e2e/fixtures/watch/dat/meow.dat
@@ -0,0 +1 @@
+i am a wannabe cat
diff --git a/pkg/e2e/fixtures/watch/data-logs/server.log b/pkg/e2e/fixtures/watch/data-logs/server.log
new file mode 100644
index 00000000000..b6b65a681d9
--- /dev/null
+++ b/pkg/e2e/fixtures/watch/data-logs/server.log
@@ -0,0 +1 @@
+[INFO] Server started successfully on port 8080
diff --git a/pkg/e2e/fixtures/watch/data/hello.txt b/pkg/e2e/fixtures/watch/data/hello.txt
new file mode 100644
index 00000000000..95d09f2b101
--- /dev/null
+++ b/pkg/e2e/fixtures/watch/data/hello.txt
@@ -0,0 +1 @@
+hello world
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/watch/exec.yaml b/pkg/e2e/fixtures/watch/exec.yaml
new file mode 100644
index 00000000000..9d232ac76b3
--- /dev/null
+++ b/pkg/e2e/fixtures/watch/exec.yaml
@@ -0,0 +1,15 @@
+services:
+ test:
+ build:
+ dockerfile_inline: FROM alpine
+ command: ping localhost
+ volumes:
+ - /data
+ develop:
+ watch:
+ - path: .
+ target: /data
+ initial_sync: true
+ action: sync+exec
+ exec:
+ command: echo "SUCCESS"
\ No newline at end of file
diff --git a/pkg/e2e/fixtures/watch/include.yaml b/pkg/e2e/fixtures/watch/include.yaml
new file mode 100644
index 00000000000..ccd9d45042b
--- /dev/null
+++ b/pkg/e2e/fixtures/watch/include.yaml
@@ -0,0 +1,12 @@
+services:
+ a:
+ build:
+ dockerfile_inline: |
+ FROM nginx
+ RUN mkdir /data/
+ develop:
+ watch:
+ - path: .
+ include: A.*
+ target: /data/
+ action: sync
diff --git a/pkg/e2e/fixtures/watch/rebuild.yaml b/pkg/e2e/fixtures/watch/rebuild.yaml
new file mode 100644
index 00000000000..561659a6ca4
--- /dev/null
+++ b/pkg/e2e/fixtures/watch/rebuild.yaml
@@ -0,0 +1,31 @@
+services:
+ a:
+ build:
+ dockerfile_inline: |
+ FROM nginx
+ RUN mkdir /data
+ COPY test /data/a
+ develop:
+ watch:
+ - path: test
+ action: rebuild
+ b:
+ build:
+ dockerfile_inline: |
+ FROM nginx
+ RUN mkdir /data
+ COPY test /data/b
+ develop:
+ watch:
+ - path: test
+ action: rebuild
+ c:
+ build:
+ dockerfile_inline: |
+ FROM nginx
+ RUN mkdir /data
+ COPY test /data/c
+ develop:
+ watch:
+ - path: test
+ action: rebuild
diff --git a/pkg/e2e/fixtures/watch/with-external-network.yaml b/pkg/e2e/fixtures/watch/with-external-network.yaml
new file mode 100644
index 00000000000..e9c948920c0
--- /dev/null
+++ b/pkg/e2e/fixtures/watch/with-external-network.yaml
@@ -0,0 +1,19 @@
+
+services:
+ ext-alpine:
+ build:
+ dockerfile_inline: |-
+ FROM alpine
+ init: true
+ command: sleep infinity
+ develop:
+ watch:
+ - action: rebuild
+ path: .env
+ networks:
+ - external_network_test
+
+networks:
+ external_network_test:
+ name: e2e-watch-external_network_test
+ external: true
diff --git a/pkg/e2e/fixtures/watch/x-initialSync.yaml b/pkg/e2e/fixtures/watch/x-initialSync.yaml
new file mode 100644
index 00000000000..6a954bd6aee
--- /dev/null
+++ b/pkg/e2e/fixtures/watch/x-initialSync.yaml
@@ -0,0 +1,15 @@
+services:
+ test:
+ build:
+ dockerfile_inline: FROM alpine
+ command: ping localhost
+ volumes:
+ - /data
+ develop:
+ watch:
+ - path: .
+ target: /data
+ action: sync+exec
+ exec:
+ command: echo "SUCCESS"
+ x-initialSync: true
\ No newline at end of file
diff --git a/pkg/e2e/framework.go b/pkg/e2e/framework.go
index d093fda3997..a4624d1de8d 100644
--- a/pkg/e2e/framework.go
+++ b/pkg/e2e/framework.go
@@ -17,24 +17,26 @@
package e2e
import (
+ "encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"net/http"
"os"
- "path"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
- "github.com/docker/compose/v2/cmd/compose"
- "github.com/pkg/errors"
+ cp "github.com/otiai10/copy"
+ "github.com/stretchr/testify/require"
"gotest.tools/v3/assert"
- is "gotest.tools/v3/assert/cmp"
"gotest.tools/v3/icmd"
"gotest.tools/v3/poll"
+
+ "github.com/docker/compose/v5/cmd/compose"
)
var (
@@ -44,66 +46,149 @@ var (
// DockerComposeExecutableName is the OS dependent Docker CLI binary name
DockerComposeExecutableName = "docker-" + compose.PluginName
- // DockerScanExecutableName is the OS dependent Docker CLI binary name
+ // DockerScanExecutableName is the OS dependent Docker Scan plugin binary name
DockerScanExecutableName = "docker-scan"
+
+ // DockerBuildxExecutableName is the Os dependent Buildx plugin binary name
+ DockerBuildxExecutableName = "docker-buildx"
+
+ // DockerModelExecutableName is the Os dependent Docker-Model plugin binary name
+ DockerModelExecutableName = "docker-model"
+
+ // WindowsExecutableSuffix is the Windows executable suffix
+ WindowsExecutableSuffix = ".exe"
)
func init() {
if runtime.GOOS == "windows" {
- DockerExecutableName = DockerExecutableName + ".exe"
- DockerComposeExecutableName = DockerComposeExecutableName + ".exe"
- DockerScanExecutableName = DockerScanExecutableName + ".exe"
+ DockerExecutableName += WindowsExecutableSuffix
+ DockerComposeExecutableName += WindowsExecutableSuffix
+ DockerScanExecutableName += WindowsExecutableSuffix
+ DockerBuildxExecutableName += WindowsExecutableSuffix
}
}
-// E2eCLI is used to wrap the CLI for end to end testing
-// nolint stutter
-type E2eCLI struct {
- BinDir string
+// CLI is used to wrap the CLI for end to end testing
+type CLI struct {
+ // ConfigDir for Docker configuration (set as DOCKER_CONFIG)
ConfigDir string
- test *testing.T
+
+ // HomeDir for tools that look for user files (set as HOME)
+ HomeDir string
+
+ // env overrides to apply to every invoked command
+ //
+ // To populate, use WithEnv when creating a CLI instance.
+ env []string
}
-// NewParallelE2eCLI returns a configured TestE2eCLI with t.Parallel() set
-func NewParallelE2eCLI(t *testing.T, binDir string) *E2eCLI {
+// CLIOption to customize behavior for all commands for a CLI instance.
+type CLIOption func(c *CLI)
+
+// NewParallelCLI marks the parent test as parallel and returns a CLI instance
+// suitable for usage across child tests.
+func NewParallelCLI(t *testing.T, opts ...CLIOption) *CLI {
+ t.Helper()
t.Parallel()
- return newE2eCLI(t, binDir)
+ return NewCLI(t, opts...)
+}
+
+// NewCLI creates a CLI instance for running E2E tests.
+func NewCLI(t testing.TB, opts ...CLIOption) *CLI {
+ t.Helper()
+
+ configDir := t.TempDir()
+ copyLocalConfig(t, configDir)
+ initializePlugins(t, configDir)
+ initializeContextDir(t, configDir)
+
+ c := &CLI{
+ ConfigDir: configDir,
+ HomeDir: t.TempDir(),
+ }
+
+ for _, opt := range opts {
+ opt(c)
+ }
+ c.RunDockerComposeCmdNoCheck(t, "version")
+ return c
+}
+
+// WithEnv sets environment variables that will be passed to commands.
+func WithEnv(env ...string) CLIOption {
+ return func(c *CLI) {
+ c.env = append(c.env, env...)
+ }
}
-func newE2eCLI(t *testing.T, binDir string) *E2eCLI {
- d, err := ioutil.TempDir("", "")
- assert.Check(t, is.Nil(err))
+func copyLocalConfig(t testing.TB, configDir string) {
+ t.Helper()
+
+ // copy local config.json if exists
+ localConfig := filepath.Join(os.Getenv("HOME"), ".docker", "config.json")
+ // if no config present just continue
+ if _, err := os.Stat(localConfig); err != nil {
+ // copy the local config.json to the test config dir
+ CopyFile(t, localConfig, filepath.Join(configDir, "config.json"))
+ }
+}
+// initializePlugins copies the necessary plugin files to the temporary config
+// directory for the test.
+func initializePlugins(t testing.TB, configDir string) {
t.Cleanup(func() {
if t.Failed() {
- conf, _ := ioutil.ReadFile(filepath.Join(d, "config.json"))
- t.Errorf("Config: %s\n", string(conf))
- t.Error("Contents of config dir:")
- for _, p := range dirContents(d) {
- t.Errorf(p)
+ if conf, err := os.ReadFile(filepath.Join(configDir, "config.json")); err == nil {
+ t.Logf("Config: %s\n", string(conf))
+ }
+ t.Log("Contents of config dir:")
+ for _, p := range dirContents(configDir) {
+ t.Logf(" - %s", p)
}
}
- _ = os.RemoveAll(d)
})
- _ = os.MkdirAll(filepath.Join(d, "cli-plugins"), 0755)
- composePlugin, err := findExecutable(DockerComposeExecutableName, []string{"../../bin", "../../../bin"})
- if os.IsNotExist(err) {
- fmt.Println("WARNING: docker-compose cli-plugin not found")
+ require.NoError(t, os.MkdirAll(filepath.Join(configDir, "cli-plugins"), 0o755),
+ "Failed to create cli-plugins directory")
+ composePlugin, err := findExecutable(DockerComposeExecutableName)
+ if errors.Is(err, fs.ErrNotExist) {
+ t.Logf("WARNING: docker-compose cli-plugin not found")
}
+
if err == nil {
- err = CopyFile(composePlugin, filepath.Join(d, "cli-plugins", DockerComposeExecutableName))
+ CopyFile(t, composePlugin, filepath.Join(configDir, "cli-plugins", DockerComposeExecutableName))
+ buildxPlugin, err := findPluginExecutable(DockerBuildxExecutableName)
if err != nil {
- panic(err)
+ t.Logf("WARNING: docker-buildx cli-plugin not found, using default buildx installation.")
+ } else {
+ CopyFile(t, buildxPlugin, filepath.Join(configDir, "cli-plugins", DockerBuildxExecutableName))
}
// We don't need a functional scan plugin, but a valid plugin binary
- err = CopyFile(composePlugin, filepath.Join(d, "cli-plugins", DockerScanExecutableName))
+ CopyFile(t, composePlugin, filepath.Join(configDir, "cli-plugins", DockerScanExecutableName))
+
+ modelPlugin, err := findPluginExecutable(DockerModelExecutableName)
if err != nil {
- panic(err)
+ t.Logf("WARNING: docker-model cli-plugin not found")
+ } else {
+ CopyFile(t, modelPlugin, filepath.Join(configDir, "cli-plugins", DockerModelExecutableName))
}
}
+}
+
+func initializeContextDir(t testing.TB, configDir string) {
+ dockerUserDir := ".docker/contexts"
+ userDir, err := os.UserHomeDir()
+ require.NoError(t, err, "Failed to get user home directory")
+ userContextsDir := filepath.Join(userDir, dockerUserDir)
+ if checkExists(userContextsDir) {
+ dstContexts := filepath.Join(configDir, "contexts")
+ require.NoError(t, cp.Copy(userContextsDir, dstContexts), "Failed to copy contexts directory")
+ }
+}
- return &E2eCLI{binDir, d, t}
+func checkExists(path string) bool {
+ _, err := os.Stat(path)
+ return err == nil
}
func dirContents(dir string) []string {
@@ -115,109 +200,206 @@ func dirContents(dir string) []string {
return res
}
-func findExecutable(executableName string, paths []string) (string, error) {
- for _, p := range paths {
- bin, err := filepath.Abs(path.Join(p, executableName))
+func findExecutable(executableName string) (string, error) {
+ bin := os.Getenv("COMPOSE_E2E_BIN_PATH")
+ if bin == "" {
+ _, filename, _, _ := runtime.Caller(0)
+ buildPath := filepath.Join(filepath.Dir(filename), "..", "..", "bin", "build")
+ var err error
+ bin, err = filepath.Abs(filepath.Join(buildPath, executableName))
if err != nil {
return "", err
}
+ }
- if _, err := os.Stat(bin); os.IsNotExist(err) {
- continue
- }
-
+ if _, err := os.Stat(bin); err == nil {
return bin, nil
}
+ return "", fmt.Errorf("looking for %q: %w", bin, fs.ErrNotExist)
+}
+
+func findPluginExecutable(pluginExecutableName string) (string, error) {
+ dockerUserDir := ".docker/cli-plugins"
+ userDir, err := os.UserHomeDir()
+ if err != nil {
+ return "", err
+ }
+ candidates := []string{
+ filepath.Join(userDir, dockerUserDir),
+ "/usr/local/lib/docker/cli-plugins",
+ "/usr/local/libexec/docker/cli-plugins",
+ "/usr/lib/docker/cli-plugins",
+ "/usr/libexec/docker/cli-plugins",
+ }
+ for _, path := range candidates {
+ bin, err := filepath.Abs(filepath.Join(path, pluginExecutableName))
+ if err != nil {
+ return "", err
+ }
+ if _, err := os.Stat(bin); err == nil {
+ return bin, nil
+ }
+ }
- return "", errors.Wrap(os.ErrNotExist, "executable not found")
+ return "", fmt.Errorf("plugin not found %s: %w", pluginExecutableName, os.ErrNotExist)
}
// CopyFile copies a file from a sourceFile to a destinationFile setting permissions to 0755
-func CopyFile(sourceFile string, destinationFile string) error {
+func CopyFile(t testing.TB, sourceFile string, destinationFile string) {
+ t.Helper()
+
src, err := os.Open(sourceFile)
- if err != nil {
- return err
- }
- // nolint: errcheck
+ require.NoError(t, err, "Failed to open source file: %s")
+ //nolint:errcheck
defer src.Close()
- dst, err := os.OpenFile(destinationFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755)
- if err != nil {
- return err
- }
- // nolint: errcheck
+ dst, err := os.OpenFile(destinationFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o755)
+ require.NoError(t, err, "Failed to open destination file: %s", destinationFile)
+ //nolint:errcheck
defer dst.Close()
- if _, err = io.Copy(dst, src); err != nil {
- return err
+ _, err = io.Copy(dst, src)
+ require.NoError(t, err, "Failed to copy file: %s", sourceFile)
+}
+
+// BaseEnvironment provides the minimal environment variables used across all
+// Docker / Compose commands.
+func (c *CLI) BaseEnvironment() []string {
+ env := []string{
+ "HOME=" + c.HomeDir,
+ "USER=" + os.Getenv("USER"),
+ "DOCKER_CONFIG=" + c.ConfigDir,
+ "KUBECONFIG=invalid",
+ "PATH=" + os.Getenv("PATH"),
+ }
+ dockerContextEnv, ok := os.LookupEnv("DOCKER_CONTEXT")
+ if ok {
+ env = append(env, "DOCKER_CONTEXT="+dockerContextEnv)
}
- return err
+ if coverdir, ok := os.LookupEnv("GOCOVERDIR"); ok {
+ _, filename, _, _ := runtime.Caller(0)
+ root := filepath.Join(filepath.Dir(filename), "..", "..")
+ coverdir = filepath.Join(root, coverdir)
+ env = append(env, fmt.Sprintf("GOCOVERDIR=%s", coverdir))
+ }
+ return env
}
// NewCmd creates a cmd object configured with the test environment set
-func (c *E2eCLI) NewCmd(command string, args ...string) icmd.Cmd {
- env := append(os.Environ(),
- "DOCKER_CONFIG="+c.ConfigDir,
- "KUBECONFIG=invalid",
- )
+func (c *CLI) NewCmd(command string, args ...string) icmd.Cmd {
+ return icmd.Cmd{
+ Command: append([]string{command}, args...),
+ Env: append(c.BaseEnvironment(), c.env...),
+ }
+}
+
+// NewCmdWithEnv creates a cmd object configured with the test environment set with additional env vars
+func (c *CLI) NewCmdWithEnv(envvars []string, command string, args ...string) icmd.Cmd {
+ // base env -> CLI overrides -> cmd overrides
+ cmdEnv := append(c.BaseEnvironment(), c.env...)
+ cmdEnv = append(cmdEnv, envvars...)
return icmd.Cmd{
Command: append([]string{command}, args...),
- Env: env,
+ Env: cmdEnv,
}
}
// MetricsSocket get the path where test metrics will be sent
-func (c *E2eCLI) MetricsSocket() string {
- return filepath.Join(c.ConfigDir, "./docker-cli.sock")
+func (c *CLI) MetricsSocket() string {
+ return filepath.Join(c.ConfigDir, "docker-cli.sock")
}
// NewDockerCmd creates a docker cmd without running it
-func (c *E2eCLI) NewDockerCmd(args ...string) icmd.Cmd {
+func (c *CLI) NewDockerCmd(t testing.TB, args ...string) icmd.Cmd {
+ t.Helper()
+ for _, arg := range args {
+ if arg == compose.PluginName {
+ t.Fatal("This test called 'RunDockerCmd' for 'compose'. Please prefer 'RunDockerComposeCmd' to be able to test as a plugin and standalone")
+ }
+ }
return c.NewCmd(DockerExecutableName, args...)
}
// RunDockerOrExitError runs a docker command and returns a result
-func (c *E2eCLI) RunDockerOrExitError(args ...string) *icmd.Result {
- fmt.Printf("\t[%s] docker %s\n", c.test.Name(), strings.Join(args, " "))
- return icmd.RunCmd(c.NewDockerCmd(args...))
+func (c *CLI) RunDockerOrExitError(t testing.TB, args ...string) *icmd.Result {
+ t.Helper()
+ t.Logf("\t[%s] docker %s\n", t.Name(), strings.Join(args, " "))
+ return icmd.RunCmd(c.NewDockerCmd(t, args...))
}
// RunCmd runs a command, expects no error and returns a result
-func (c *E2eCLI) RunCmd(args ...string) *icmd.Result {
- fmt.Printf("\t[%s] %s\n", c.test.Name(), strings.Join(args, " "))
- assert.Assert(c.test, len(args) >= 1, "require at least one command in parameters")
+func (c *CLI) RunCmd(t testing.TB, args ...string) *icmd.Result {
+ t.Helper()
+ t.Logf("\t[%s] %s\n", t.Name(), strings.Join(args, " "))
+ assert.Assert(t, len(args) >= 1, "require at least one command in parameters")
res := icmd.RunCmd(c.NewCmd(args[0], args[1:]...))
- res.Assert(c.test, icmd.Success)
+ res.Assert(t, icmd.Success)
+ return res
+}
+
+// RunCmdInDir runs a command in a given dir, expects no error and returns a result
+func (c *CLI) RunCmdInDir(t testing.TB, dir string, args ...string) *icmd.Result {
+ t.Helper()
+ t.Logf("\t[%s] %s\n", t.Name(), strings.Join(args, " "))
+ assert.Assert(t, len(args) >= 1, "require at least one command in parameters")
+ cmd := c.NewCmd(args[0], args[1:]...)
+ cmd.Dir = dir
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Success)
return res
}
// RunDockerCmd runs a docker command, expects no error and returns a result
-func (c *E2eCLI) RunDockerCmd(args ...string) *icmd.Result {
- if len(args) > 0 && args[0] == compose.PluginName {
- c.test.Fatal("This test called 'RunDockerCmd' for 'compose'. Please prefer 'RunDockerComposeCmd' to be able to test as a plugin and standalone")
- }
- res := c.RunDockerOrExitError(args...)
- res.Assert(c.test, icmd.Success)
+func (c *CLI) RunDockerCmd(t testing.TB, args ...string) *icmd.Result {
+ t.Helper()
+ res := c.RunDockerOrExitError(t, args...)
+ res.Assert(t, icmd.Success)
return res
}
// RunDockerComposeCmd runs a docker compose command, expects no error and returns a result
-func (c *E2eCLI) RunDockerComposeCmd(args ...string) *icmd.Result {
- res := c.RunDockerComposeCmdNoCheck(args...)
- res.Assert(c.test, icmd.Success)
+func (c *CLI) RunDockerComposeCmd(t testing.TB, args ...string) *icmd.Result {
+ t.Helper()
+ res := c.RunDockerComposeCmdNoCheck(t, args...)
+ res.Assert(t, icmd.Success)
return res
}
// RunDockerComposeCmdNoCheck runs a docker compose command, don't presume of any expectation and returns a result
-func (c *E2eCLI) RunDockerComposeCmdNoCheck(args ...string) *icmd.Result {
+func (c *CLI) RunDockerComposeCmdNoCheck(t testing.TB, args ...string) *icmd.Result {
+ t.Helper()
+ cmd := c.NewDockerComposeCmd(t, args...)
+ cmd.Stdout = os.Stdout
+ t.Logf("Running command: %s", strings.Join(cmd.Command, " "))
+ return icmd.RunCmd(cmd)
+}
+
+// NewDockerComposeCmd creates a command object for Compose, either in plugin
+// or standalone mode (based on build tags).
+func (c *CLI) NewDockerComposeCmd(t testing.TB, args ...string) icmd.Cmd {
+ t.Helper()
if composeStandaloneMode {
- composeBinary, err := findExecutable(DockerComposeExecutableName, []string{"../../bin", "../../../bin"})
- assert.NilError(c.test, err)
- return icmd.RunCmd(c.NewCmd(composeBinary, args...))
+ return c.NewCmd(ComposeStandalonePath(t), args...)
}
args = append([]string{"compose"}, args...)
- return icmd.RunCmd(c.NewCmd(DockerExecutableName, args...))
+ return c.NewCmd(DockerExecutableName, args...)
+}
+
+// ComposeStandalonePath returns the path to the locally-built Compose
+// standalone binary from the repo.
+//
+// This function will fail the test immediately if invoked when not running
+// in standalone test mode.
+func ComposeStandalonePath(t testing.TB) string {
+ t.Helper()
+ if !composeStandaloneMode {
+ require.Fail(t, "Not running in standalone mode")
+ }
+ composeBinary, err := findExecutable(DockerComposeExecutableName)
+ require.NoError(t, err, "Could not find standalone Compose binary (%q)",
+ DockerComposeExecutableName)
+ return composeBinary
}
// StdoutContains returns a predicate on command result expecting a string in stdout
@@ -227,23 +409,58 @@ func StdoutContains(expected string) func(*icmd.Result) bool {
}
}
+func IsHealthy(service string) func(res *icmd.Result) bool {
+ return func(res *icmd.Result) bool {
+ type state struct {
+ Name string `json:"name"`
+ Health string `json:"health"`
+ }
+
+ decoder := json.NewDecoder(strings.NewReader(res.Stdout()))
+ for decoder.More() {
+ ps := state{}
+ err := decoder.Decode(&ps)
+ if err != nil {
+ return false
+ }
+ if ps.Name == service && ps.Health == "healthy" {
+ return true
+ }
+ }
+ return false
+ }
+}
+
// WaitForCmdResult try to execute a cmd until resulting output matches given predicate
-func (c *E2eCLI) WaitForCmdResult(command icmd.Cmd, predicate func(*icmd.Result) bool, timeout time.Duration, delay time.Duration) {
- assert.Assert(c.test, timeout.Nanoseconds() > delay.Nanoseconds(), "timeout must be greater than delay")
+func (c *CLI) WaitForCmdResult(
+ t testing.TB,
+ command icmd.Cmd,
+ predicate func(*icmd.Result) bool,
+ timeout time.Duration,
+ delay time.Duration,
+) {
+ t.Helper()
+ assert.Assert(t, timeout.Nanoseconds() > delay.Nanoseconds(), "timeout must be greater than delay")
var res *icmd.Result
checkStopped := func(logt poll.LogT) poll.Result {
- fmt.Printf("\t[%s] %s\n", c.test.Name(), strings.Join(command.Command, " "))
+ fmt.Printf("\t[%s] %s\n", t.Name(), strings.Join(command.Command, " "))
res = icmd.RunCmd(command)
if !predicate(res) {
return poll.Continue("Cmd output did not match requirement: %q", res.Combined())
}
return poll.Success()
}
- poll.WaitOn(c.test, checkStopped, poll.WithDelay(delay), poll.WithTimeout(timeout))
+ poll.WaitOn(t, checkStopped, poll.WithDelay(delay), poll.WithTimeout(timeout))
}
// WaitForCondition wait for predicate to execute to true
-func (c *E2eCLI) WaitForCondition(predicate func() (bool, string), timeout time.Duration, delay time.Duration) {
+func (c *CLI) WaitForCondition(
+ t testing.TB,
+ predicate func() (bool, string),
+ timeout time.Duration,
+ delay time.Duration,
+) {
+ t.Helper()
checkStopped := func(logt poll.LogT) poll.Result {
pass, description := predicate()
if !pass {
@@ -251,7 +468,7 @@ func (c *E2eCLI) WaitForCondition(predicate func() (bool, string), timeout time.
}
return poll.Success()
}
- poll.WaitOn(c.test, checkStopped, poll.WithDelay(delay), poll.WithTimeout(timeout))
+ poll.WaitOn(t, checkStopped, poll.WithDelay(delay), poll.WithTimeout(timeout))
}
// Lines split output into lines
@@ -260,9 +477,16 @@ func Lines(output string) []string {
}
// HTTPGetWithRetry performs an HTTP GET on an `endpoint`, using retryDelay also as a request timeout.
-// In the case of an error or the response status is not the expeted one, it retries the same request,
+// In the case of an error or the response status is not the expected one, it retries the same request,
// returning the response body as a string (empty if we could not reach it)
-func HTTPGetWithRetry(t *testing.T, endpoint string, expectedStatus int, retryDelay time.Duration, timeout time.Duration) string {
+func HTTPGetWithRetry(
+ t testing.TB,
+ endpoint string,
+ expectedStatus int,
+ retryDelay time.Duration,
+ timeout time.Duration,
+) string {
+ t.Helper()
var (
r *http.Response
err error
@@ -283,9 +507,14 @@ func HTTPGetWithRetry(t *testing.T, endpoint string, expectedStatus int, retryDe
}
poll.WaitOn(t, checkUp, poll.WithDelay(retryDelay), poll.WithTimeout(timeout))
if r != nil {
- b, err := ioutil.ReadAll(r.Body)
+ b, err := io.ReadAll(r.Body)
assert.NilError(t, err)
return string(b)
}
return ""
}
+
+func (c *CLI) cleanupWithDown(t testing.TB, project string, args ...string) {
+ t.Helper()
+ c.RunDockerComposeCmd(t, append([]string{"-p", project, "down", "-v", "--remove-orphans"}, args...)...)
+}
diff --git a/pkg/e2e/healthcheck_test.go b/pkg/e2e/healthcheck_test.go
new file mode 100644
index 00000000000..227d835a8d3
--- /dev/null
+++ b/pkg/e2e/healthcheck_test.go
@@ -0,0 +1,55 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "strings"
+ "testing"
+ "time"
+
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
+)
+
+func TestStartInterval(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "e2e-start-interval"
+
+ t.Cleanup(func() {
+ c.cleanupWithDown(t, projectName)
+ })
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/start_interval/compose.yaml", "--project-name", projectName, "up", "--wait", "-d", "error")
+ res.Assert(t, icmd.Expected{ExitCode: 1, Err: "healthcheck.start_interval requires healthcheck.start_period to be set"})
+
+ timeout := time.After(30 * time.Second)
+ done := make(chan bool)
+ go func() {
+ //nolint:nolintlint,testifylint // helper asserts inside goroutine; acceptable in this e2e test
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/start_interval/compose.yaml", "--project-name", projectName, "up", "--wait", "-d", "test")
+ out := res.Combined()
+ assert.Assert(t, strings.Contains(out, "Healthy"), out)
+ done <- true
+ }()
+
+ select {
+ case <-timeout:
+ t.Fatal("test did not finish in time")
+ case <-done:
+ break
+ }
+}
diff --git a/pkg/e2e/hooks_test.go b/pkg/e2e/hooks_test.go
new file mode 100644
index 00000000000..b77500c6bf6
--- /dev/null
+++ b/pkg/e2e/hooks_test.go
@@ -0,0 +1,109 @@
+/*
+Copyright 2023 Docker Compose CLI authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package e2e
+
+import (
+ "strings"
+ "testing"
+
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
+)
+
+func TestPostStartHookInError(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "hooks-post-start-failure"
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "-v", "--remove-orphans", "-t", "0")
+ })
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/hooks/poststart/compose-error.yaml", "--project-name", projectName, "up", "-d")
+ res.Assert(t, icmd.Expected{ExitCode: 1})
+ assert.Assert(t, strings.Contains(res.Combined(), "test hook exited with status 127"), res.Combined())
+}
+
+func TestPostStartHookSuccess(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "hooks-post-start-success"
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "-v", "--remove-orphans", "-t", "0")
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/hooks/poststart/compose-success.yaml", "--project-name", projectName, "up", "-d")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+}
+
+func TestPreStopHookSuccess(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "hooks-pre-stop-success"
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "-f", "fixtures/hooks/prestop/compose-success.yaml", "--project-name", projectName, "down", "-v", "--remove-orphans", "-t", "0")
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/hooks/prestop/compose-success.yaml", "--project-name", projectName, "up", "-d")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+
+ res = c.RunDockerComposeCmd(t, "-f", "fixtures/hooks/prestop/compose-success.yaml", "--project-name", projectName, "down", "-v", "--remove-orphans", "-t", "0")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+}
+
+func TestPreStopHookInError(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "hooks-pre-stop-failure"
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "-f", "fixtures/hooks/prestop/compose-success.yaml", "--project-name", projectName, "down", "-v", "--remove-orphans", "-t", "0")
+ })
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/hooks/prestop/compose-error.yaml", "--project-name", projectName, "up", "-d")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+
+ res = c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/hooks/prestop/compose-error.yaml", "--project-name", projectName, "down", "-v", "--remove-orphans", "-t", "0")
+ res.Assert(t, icmd.Expected{ExitCode: 1})
+ assert.Assert(t, strings.Contains(res.Combined(), "sample hook exited with status 127"))
+}
+
+func TestPreStopHookSuccessWithPreviousStop(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "hooks-pre-stop-success-with-previous-stop"
+
+ t.Cleanup(func() {
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/hooks/compose.yaml", "--project-name", projectName, "down", "-v", "--remove-orphans", "-t", "0")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/hooks/compose.yaml", "--project-name", projectName, "up", "-d")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+
+ res = c.RunDockerComposeCmd(t, "-f", "fixtures/hooks/compose.yaml", "--project-name", projectName, "stop", "sample")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+}
+
+func TestPostStartAndPreStopHook(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "hooks-post-start-and-pre-stop"
+
+ t.Cleanup(func() {
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/hooks/compose.yaml", "--project-name", projectName, "down", "-v", "--remove-orphans", "-t", "0")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/hooks/compose.yaml", "--project-name", projectName, "up", "-d")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+}
diff --git a/pkg/e2e/ipc_test.go b/pkg/e2e/ipc_test.go
index e28bcbc9fee..7a46192a601 100644
--- a/pkg/e2e/ipc_test.go
+++ b/pkg/e2e/ipc_test.go
@@ -25,39 +25,40 @@ import (
)
func TestIPC(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
const projectName = "ipc_e2e"
var cid string
t.Run("create ipc mode container", func(t *testing.T) {
- res := c.RunDockerCmd("run", "-d", "--rm", "--ipc=shareable", "--name", "ipc_mode_container", "alpine", "top")
+ res := c.RunDockerCmd(t, "run", "-d", "--rm", "--ipc=shareable", "--name", "ipc_mode_container", "alpine",
+ "top")
cid = strings.Trim(res.Stdout(), "\n")
})
t.Run("up", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/ipc-test/compose.yaml", "--project-name", projectName, "up", "-d")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/ipc-test/compose.yaml", "--project-name", projectName, "up", "-d")
})
t.Run("check running project", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-p", projectName, "ps")
+ res := c.RunDockerComposeCmd(t, "-p", projectName, "ps")
res.Assert(t, icmd.Expected{Out: `shareable`})
})
t.Run("check ipcmode in container inspect", func(t *testing.T) {
- res := c.RunDockerCmd("inspect", projectName+"-shareable-1")
+ res := c.RunDockerCmd(t, "inspect", projectName+"-shareable-1")
res.Assert(t, icmd.Expected{Out: `"IpcMode": "shareable",`})
- res = c.RunDockerCmd("inspect", projectName+"-service-1")
+ res = c.RunDockerCmd(t, "inspect", projectName+"-service-1")
res.Assert(t, icmd.Expected{Out: `"IpcMode": "container:`})
- res = c.RunDockerCmd("inspect", projectName+"-container-1")
+ res = c.RunDockerCmd(t, "inspect", projectName+"-container-1")
res.Assert(t, icmd.Expected{Out: fmt.Sprintf(`"IpcMode": "container:%s",`, cid)})
})
t.Run("down", func(t *testing.T) {
- _ = c.RunDockerComposeCmd("--project-name", projectName, "down")
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
})
t.Run("remove ipc mode container", func(t *testing.T) {
- _ = c.RunDockerCmd("rm", "-f", "ipc_mode_container")
+ _ = c.RunDockerCmd(t, "rm", "-f", "ipc_mode_container")
})
}
diff --git a/pkg/e2e/logs_test.go b/pkg/e2e/logs_test.go
index a744d89f490..de5c22ffe3d 100644
--- a/pkg/e2e/logs_test.go
+++ b/pkg/e2e/logs_test.go
@@ -17,42 +17,116 @@
package e2e
import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
"strings"
"testing"
+ "time"
"gotest.tools/v3/assert"
+ "gotest.tools/v3/poll"
"gotest.tools/v3/icmd"
)
func TestLocalComposeLogs(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
const projectName = "compose-e2e-logs"
t.Run("up", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/logs-test/compose.yaml", "--project-name", projectName, "up", "-d")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/logs-test/compose.yaml", "--project-name", projectName, "up", "-d")
})
t.Run("logs", func(t *testing.T) {
- res := c.RunDockerComposeCmd("--project-name", projectName, "logs")
- res.Assert(t, icmd.Expected{Out: `PING localhost (127.0.0.1)`})
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "logs")
+ res.Assert(t, icmd.Expected{Out: `PING localhost`})
res.Assert(t, icmd.Expected{Out: `hello`})
})
t.Run("logs ping", func(t *testing.T) {
- res := c.RunDockerComposeCmd("--project-name", projectName, "logs", "ping")
- res.Assert(t, icmd.Expected{Out: `PING localhost (127.0.0.1)`})
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "logs", "ping")
+ res.Assert(t, icmd.Expected{Out: `PING localhost`})
assert.Assert(t, !strings.Contains(res.Stdout(), "hello"))
})
t.Run("logs hello", func(t *testing.T) {
- res := c.RunDockerComposeCmd("--project-name", projectName, "logs", "hello", "ping")
- res.Assert(t, icmd.Expected{Out: `PING localhost (127.0.0.1)`})
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "logs", "hello", "ping")
+ res.Assert(t, icmd.Expected{Out: `PING localhost`})
res.Assert(t, icmd.Expected{Out: `hello`})
})
+ t.Run("logs hello index", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "logs", "--index", "2", "hello")
+
+ // docker-compose logs hello
+ // logs-test-hello-2 | hello
+ // logs-test-hello-1 | hello
+ t.Log(res.Stdout())
+ assert.Assert(t, !strings.Contains(res.Stdout(), "hello-1"))
+ assert.Assert(t, strings.Contains(res.Stdout(), "hello-2"))
+ })
+
t.Run("down", func(t *testing.T) {
- _ = c.RunDockerComposeCmd("--project-name", projectName, "down")
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+}
+
+func TestLocalComposeLogsFollow(t *testing.T) {
+ c := NewCLI(t, WithEnv("REPEAT=20"))
+ const projectName = "compose-e2e-logs"
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/logs-test/compose.yaml", "--project-name", projectName, "up", "-d", "ping")
+
+ cmd := c.NewDockerComposeCmd(t, "--project-name", projectName, "logs", "-f")
+ res := icmd.StartCmd(cmd)
+ t.Cleanup(func() {
+ _ = res.Cmd.Process.Kill()
+ })
+
+ poll.WaitOn(t, expectOutput(res, "ping-1 "), poll.WithDelay(100*time.Millisecond), poll.WithTimeout(1*time.Second))
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/logs-test/compose.yaml", "--project-name", projectName, "up", "-d")
+
+ poll.WaitOn(t, expectOutput(res, "hello-1 "), poll.WithDelay(100*time.Millisecond), poll.WithTimeout(1*time.Second))
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/logs-test/compose.yaml", "--project-name", projectName, "up", "-d", "--scale", "ping=2", "ping")
+
+ poll.WaitOn(t, expectOutput(res, "ping-2 "), poll.WithDelay(100*time.Millisecond), poll.WithTimeout(20*time.Second))
+}
+
+func TestLocalComposeLargeLogs(t *testing.T) {
+ const projectName = "compose-e2e-large_logs"
+ file := filepath.Join(t.TempDir(), "large.txt")
+ c := NewCLI(t, WithEnv("FILE="+file))
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
})
+
+ f, err := os.Create(file)
+ assert.NilError(t, err)
+ for i := 0; i < 300_000; i++ {
+ _, err := io.WriteString(f, fmt.Sprintf("This is line %d in a laaaarge text file\n", i))
+ assert.NilError(t, err)
+ }
+ assert.NilError(t, f.Close())
+
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/logs-test/cat.yaml", "--project-name", projectName, "up", "--abort-on-container-exit", "--menu=false")
+ cmd.Stdout = io.Discard
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{Out: "test-1 exited with code 0"})
+}
+
+func expectOutput(res *icmd.Result, expected string) func(t poll.LogT) poll.Result {
+ return func(t poll.LogT) poll.Result {
+ if strings.Contains(res.Stdout(), expected) {
+ return poll.Success()
+ }
+ return poll.Continue("condition not met")
+ }
}
diff --git a/pkg/e2e/metrics_test.go b/pkg/e2e/metrics_test.go
deleted file mode 100644
index b1a9e667fe2..00000000000
--- a/pkg/e2e/metrics_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package e2e
-
-import (
- "runtime"
- "testing"
-
- "gotest.tools/v3/icmd"
-)
-
-func TestComposeMetrics(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
-
- t.Run("catch specific failure metrics", func(t *testing.T) {
- res := c.RunDockerOrExitError("compose", "-f", "fixtures/does-not-exist/compose.yaml", "build")
- expectedErr := "fixtures/does-not-exist/compose.yaml: no such file or directory"
- if runtime.GOOS == "windows" {
- expectedErr = "does-not-exist\\compose.yaml: The system cannot find the path specified"
- }
- res.Assert(t, icmd.Expected{ExitCode: 14, Err: expectedErr})
- res = c.RunDockerOrExitError("compose", "-f", "fixtures/wrong-composefile/compose.yaml", "up", "-d")
- res.Assert(t, icmd.Expected{ExitCode: 15, Err: "services.simple Additional property wrongField is not allowed"})
- res = c.RunDockerOrExitError("compose", "up")
- res.Assert(t, icmd.Expected{ExitCode: 14, Err: "no configuration file provided: not found"})
- res = c.RunDockerOrExitError("compose", "up", "-f", "fixtures/wrong-composefile/compose.yaml")
- res.Assert(t, icmd.Expected{ExitCode: 16, Err: "unknown shorthand flag: 'f' in -f"})
- res = c.RunDockerOrExitError("compose", "up", "--file", "fixtures/wrong-composefile/compose.yaml")
- res.Assert(t, icmd.Expected{ExitCode: 16, Err: "unknown flag: --file"})
- res = c.RunDockerOrExitError("compose", "donw", "--file", "fixtures/wrong-composefile/compose.yaml")
- res.Assert(t, icmd.Expected{ExitCode: 16, Err: `unknown docker command: "compose donw"`})
- res = c.RunDockerOrExitError("compose", "--file", "fixtures/wrong-composefile/build-error.yml", "build")
- res.Assert(t, icmd.Expected{ExitCode: 17, Err: `line 17: unknown instruction: WRONG`})
- res = c.RunDockerOrExitError("compose", "--file", "fixtures/wrong-composefile/build-error.yml", "up")
- res.Assert(t, icmd.Expected{ExitCode: 17, Err: `line 17: unknown instruction: WRONG`})
- res = c.RunDockerOrExitError("compose", "--file", "fixtures/wrong-composefile/unknown-image.yml", "pull")
- res.Assert(t, icmd.Expected{ExitCode: 18, Err: `pull access denied for unknownimage, repository does not exist or may require 'docker login'`})
- res = c.RunDockerOrExitError("compose", "--file", "fixtures/wrong-composefile/unknown-image.yml", "up")
- res.Assert(t, icmd.Expected{ExitCode: 18, Err: `pull access denied for unknownimage, repository does not exist or may require 'docker login'`})
- })
-}
diff --git a/pkg/e2e/start_fail_test.go b/pkg/e2e/model_test.go
similarity index 60%
rename from pkg/e2e/start_fail_test.go
rename to pkg/e2e/model_test.go
index dfc8b143f21..f30d7c5bb0c 100644
--- a/pkg/e2e/start_fail_test.go
+++ b/pkg/e2e/model_test.go
@@ -18,16 +18,12 @@ package e2e
import (
"testing"
-
- "gotest.tools/v3/icmd"
)
-func TestStartFail(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
- const projectName = "e2e-start-fail"
-
- res := c.RunDockerOrExitError("compose", "-f", "fixtures/start-fail/compose.yaml", "--project-name", projectName, "up", "-d")
- res.Assert(t, icmd.Expected{ExitCode: 1, Err: `container for service "fail" is unhealthy`})
+func TestComposeModel(t *testing.T) {
+ t.Skip("waiting for docker-model release")
+ c := NewParallelCLI(t)
+ defer c.cleanupWithDown(t, "model-test")
- c.RunDockerComposeCmd("--project-name", projectName, "down")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/model/compose.yaml", "run", "test", "sh", "-c", "curl ${FOO_URL}")
}
diff --git a/pkg/e2e/networks_test.go b/pkg/e2e/networks_test.go
index 36582dfc7c3..c9b882b9111 100644
--- a/pkg/e2e/networks_test.go
+++ b/pkg/e2e/networks_test.go
@@ -17,6 +17,7 @@
package e2e
import (
+ "fmt"
"net/http"
"strings"
"testing"
@@ -27,107 +28,195 @@ import (
)
func TestNetworks(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ // fixture is shared with TestNetworkModes and is not safe to run concurrently
+ const projectName = "network-e2e"
+ c := NewCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME="+projectName,
+ "COMPOSE_FILE=./fixtures/network-test/compose.yaml",
+ ))
- const projectName = "network_e2e"
+ c.RunDockerComposeCmd(t, "down", "-t0", "-v")
- t.Run("ensure we do not reuse previous networks", func(t *testing.T) {
- c.RunDockerOrExitError("network", "rm", projectName+"_dbnet")
- c.RunDockerOrExitError("network", "rm", "microservices")
- })
-
- t.Run("up", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/network-test/compose.yaml", "--project-name", projectName, "up", "-d")
- })
+ c.RunDockerComposeCmd(t, "up", "-d")
- t.Run("check running project", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-p", projectName, "ps")
- res.Assert(t, icmd.Expected{Out: `web`})
+ res := c.RunDockerComposeCmd(t, "ps")
+ res.Assert(t, icmd.Expected{Out: `web`})
- endpoint := "http://localhost/"
- output := HTTPGetWithRetry(t, endpoint+"/words/noun", http.StatusOK, 2*time.Second, 20*time.Second)
- assert.Assert(t, strings.Contains(output, `"word":`))
+ endpoint := "http://localhost/"
+ output := HTTPGetWithRetry(t, endpoint+"/words/noun", http.StatusOK, 2*time.Second, 20*time.Second)
+ assert.Assert(t, strings.Contains(output, `"word":`))
- res = c.RunDockerCmd("network", "ls")
- res.Assert(t, icmd.Expected{Out: projectName + "_dbnet"})
- res.Assert(t, icmd.Expected{Out: "microservices"})
- })
-
- t.Run("port", func(t *testing.T) {
- res := c.RunDockerComposeCmd("--project-name", projectName, "port", "words", "8080")
- res.Assert(t, icmd.Expected{Out: `0.0.0.0:8080`})
- })
+ res = c.RunDockerCmd(t, "network", "ls")
+ res.Assert(t, icmd.Expected{Out: projectName + "_dbnet"})
+ res.Assert(t, icmd.Expected{Out: "microservices"})
- t.Run("down", func(t *testing.T) {
- _ = c.RunDockerComposeCmd("--project-name", projectName, "down")
- })
+ res = c.RunDockerComposeCmd(t, "port", "words", "8080")
+ res.Assert(t, icmd.Expected{Out: `0.0.0.0:8080`})
- t.Run("check networks after down", func(t *testing.T) {
- res := c.RunDockerCmd("network", "ls")
- assert.Assert(t, !strings.Contains(res.Combined(), projectName), res.Combined())
- assert.Assert(t, !strings.Contains(res.Combined(), "microservices"), res.Combined())
- })
+ c.RunDockerComposeCmd(t, "down", "-t0", "-v")
+ res = c.RunDockerCmd(t, "network", "ls")
+ assert.Assert(t, !strings.Contains(res.Combined(), projectName), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), "microservices"), res.Combined())
}
-func TestNetworkAliassesAndLinks(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+func TestNetworkAliases(t *testing.T) {
+ c := NewParallelCLI(t)
const projectName = "network_alias_e2e"
+ defer c.cleanupWithDown(t, projectName)
t.Run("up", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/network-alias/compose.yaml", "--project-name", projectName, "up", "-d")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/network-alias/compose.yaml", "--project-name", projectName, "up",
+ "-d")
})
t.Run("curl alias", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/network-alias/compose.yaml", "--project-name", projectName, "exec", "-T", "container1", "curl", "http://alias-of-container2/")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/network-alias/compose.yaml", "--project-name", projectName,
+ "exec", "-T", "container1", "curl", "http://alias-of-container2/")
assert.Assert(t, strings.Contains(res.Stdout(), "Welcome to nginx!"), res.Stdout())
})
t.Run("curl links", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/network-alias/compose.yaml", "--project-name", projectName, "exec", "-T", "container1", "curl", "http://container/")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/network-alias/compose.yaml", "--project-name", projectName,
+ "exec", "-T", "container1", "curl", "http://container/")
+ assert.Assert(t, strings.Contains(res.Stdout(), "Welcome to nginx!"), res.Stdout())
+ })
+
+ t.Run("down", func(t *testing.T) {
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+}
+
+func TestNetworkLinks(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ const projectName = "network_link_e2e"
+
+ t.Run("up", func(t *testing.T) {
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/network-links/compose.yaml", "--project-name", projectName, "up",
+ "-d")
+ })
+
+ t.Run("curl links in default bridge network", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/network-links/compose.yaml", "--project-name", projectName,
+ "exec", "-T", "container2", "curl", "http://container1/")
assert.Assert(t, strings.Contains(res.Stdout(), "Welcome to nginx!"), res.Stdout())
})
t.Run("down", func(t *testing.T) {
- _ = c.RunDockerComposeCmd("--project-name", projectName, "down")
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
})
}
func TestIPAMConfig(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
const projectName = "ipam_e2e"
t.Run("ensure we do not reuse previous networks", func(t *testing.T) {
- c.RunDockerOrExitError("network", "rm", projectName+"_default")
+ c.RunDockerOrExitError(t, "network", "rm", projectName+"_default")
})
t.Run("up", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/ipam/compose.yaml", "--project-name", projectName, "up", "-d")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/ipam/compose.yaml", "--project-name", projectName, "up", "-d")
})
t.Run("ensure service get fixed IP assigned", func(t *testing.T) {
- res := c.RunDockerCmd("inspect", projectName+"-foo-1", "-f", "{{ .NetworkSettings.Networks."+projectName+"_default.IPAddress }}")
+ res := c.RunDockerCmd(t, "inspect", projectName+"-foo-1", "-f",
+ fmt.Sprintf(`{{ $network := index .NetworkSettings.Networks "%s_default" }}{{ $network.IPAMConfig.IPv4Address }}`, projectName))
res.Assert(t, icmd.Expected{Out: "10.1.0.100"})
})
t.Run("down", func(t *testing.T) {
- _ = c.RunDockerComposeCmd("--project-name", projectName, "down")
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
})
}
func TestNetworkModes(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ // fixture is shared with TestNetworks and is not safe to run concurrently
+ c := NewCLI(t)
const projectName = "network_mode_service_run"
+ defer c.cleanupWithDown(t, projectName)
t.Run("run with service mode dependency", func(t *testing.T) {
- res := c.RunDockerOrExitError("compose", "-f", "./fixtures/network-test/compose.yaml", "--project-name", projectName, "run", "-T", "mydb", "echo", "success")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/network-test/compose.yaml", "--project-name", projectName, "run", "-T", "mydb", "echo", "success")
res.Assert(t, icmd.Expected{Out: "success"})
+ })
+}
+
+func TestNetworkConfigChanged(t *testing.T) {
+ t.Skip("unstable")
+ // fixture is shared with TestNetworks and is not safe to run concurrently
+ c := NewCLI(t)
+ const projectName = "network_config_change"
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/network-test/compose.subnet.yaml", "--project-name", projectName, "up", "-d")
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
})
- t.Run("down", func(t *testing.T) {
- _ = c.RunDockerComposeCmd("--project-name", projectName, "down")
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "exec", "test", "hostname", "-i")
+ res.Assert(t, icmd.Expected{Out: "172.99.0."})
+ res.Combined()
+
+ cmd := c.NewCmdWithEnv([]string{"SUBNET=192.168.0.0/16"},
+ "docker", "compose", "-f", "./fixtures/network-test/compose.subnet.yaml", "--project-name", projectName, "up", "-d")
+ res = icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Success)
+ out := res.Combined()
+ fmt.Println(out)
+
+ res = c.RunDockerComposeCmd(t, "--project-name", projectName, "exec", "test", "hostname", "-i")
+ res.Assert(t, icmd.Expected{Out: "192.168.0."})
+}
+
+func TestMacAddress(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "network_mac_address"
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/network-test/mac_address.yaml", "--project-name", projectName, "up", "-d")
+ t.Cleanup(func() {
+ c.cleanupWithDown(t, projectName)
})
+ res := c.RunDockerCmd(t, "inspect", fmt.Sprintf("%s-test-1", projectName), "-f", "{{ (index .NetworkSettings.Networks \"network_mac_address_default\" ).MacAddress }}")
+ res.Assert(t, icmd.Expected{Out: "00:e0:84:35:d0:e8"})
+}
+
+func TestInterfaceName(t *testing.T) {
+ c := NewCLI(t)
+
+ version := c.RunDockerCmd(t, "version", "-f", "{{.Server.Version}}")
+ major, _, found := strings.Cut(version.Combined(), ".")
+ assert.Assert(t, found)
+ if major == "26" || major == "27" {
+ t.Skip("Skipping test due to docker version < 28")
+ }
+
+ const projectName = "network_interface_name"
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/network-interface-name/compose.yaml", "--project-name", projectName, "run", "test")
+ t.Cleanup(func() {
+ c.cleanupWithDown(t, projectName)
+ })
+ res.Assert(t, icmd.Expected{Out: "foobar@"})
+}
+
+func TestNetworkRecreate(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "network_recreate"
+ t.Cleanup(func() {
+ c.cleanupWithDown(t, projectName)
+ })
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/network-recreate/compose.yaml", "--project-name", projectName, "up", "-d")
+
+ c = NewCLI(t, WithEnv("FOO=bar"))
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/network-recreate/compose.yaml", "--project-name", projectName, "--progress=plain", "up", "-d")
+ err := res.Stderr()
+ fmt.Println(err)
+ res.Assert(t, icmd.Expected{Err: `
+ Container network_recreate-web-1 Stopped
+ Network network_recreate_test Removed
+ Network network_recreate_test Creating
+ Network network_recreate_test Created
+ Container network_recreate-web-1 Starting
+ Container network_recreate-web-1 Started`})
}
diff --git a/pkg/e2e/noDeps_test.go b/pkg/e2e/noDeps_test.go
new file mode 100644
index 00000000000..fad86f2fea6
--- /dev/null
+++ b/pkg/e2e/noDeps_test.go
@@ -0,0 +1,61 @@
+//go:build !windows
+// +build !windows
+
+/*
+ Copyright 2022 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "fmt"
+ "testing"
+
+ "gotest.tools/v3/icmd"
+)
+
+func TestNoDepsVolumeFrom(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "e2e-no-deps-volume-from"
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+
+ c.RunDockerComposeCmd(t, "-f", "fixtures/no-deps/volume-from.yaml", "--project-name", projectName, "up", "-d")
+
+ c.RunDockerComposeCmd(t, "-f", "fixtures/no-deps/volume-from.yaml", "--project-name", projectName, "up", "--no-deps", "-d", "app")
+
+ c.RunDockerCmd(t, "rm", "-f", fmt.Sprintf("%s-db-1", projectName))
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/no-deps/volume-from.yaml", "--project-name", projectName, "up", "--no-deps", "-d", "app")
+ res.Assert(t, icmd.Expected{ExitCode: 1, Err: "cannot share volume with service db: container missing"})
+}
+
+func TestNoDepsNetworkMode(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "e2e-no-deps-network-mode"
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+
+ c.RunDockerComposeCmd(t, "-f", "fixtures/no-deps/network-mode.yaml", "--project-name", projectName, "up", "-d")
+
+ c.RunDockerComposeCmd(t, "-f", "fixtures/no-deps/network-mode.yaml", "--project-name", projectName, "up", "--no-deps", "-d", "app")
+
+ c.RunDockerCmd(t, "rm", "-f", fmt.Sprintf("%s-db-1", projectName))
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/no-deps/network-mode.yaml", "--project-name", projectName, "up", "--no-deps", "-d", "app")
+ res.Assert(t, icmd.Expected{ExitCode: 1, Err: "cannot share network namespace with service db: container missing"})
+}
diff --git a/pkg/e2e/orphans_test.go b/pkg/e2e/orphans_test.go
new file mode 100644
index 00000000000..e721e7a540b
--- /dev/null
+++ b/pkg/e2e/orphans_test.go
@@ -0,0 +1,40 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "strings"
+ "testing"
+
+ "gotest.tools/v3/assert"
+)
+
+func TestRemoveOrphans(t *testing.T) {
+ c := NewCLI(t)
+
+ const projectName = "compose-e2e-orphans"
+ defer c.cleanupWithDown(t, projectName)
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/orphans/compose.yaml", "-p", projectName, "run", "orphan")
+ res := c.RunDockerComposeCmd(t, "-p", projectName, "ps", "--all")
+ assert.Check(t, strings.Contains(res.Combined(), "compose-e2e-orphans-orphan-run-"))
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/orphans/compose.yaml", "-p", projectName, "up", "-d")
+
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps", "--all")
+ assert.Check(t, !strings.Contains(res.Combined(), "compose-e2e-orphans-orphan-run-"))
+}
diff --git a/pkg/e2e/pause_test.go b/pkg/e2e/pause_test.go
new file mode 100644
index 00000000000..200301a2415
--- /dev/null
+++ b/pkg/e2e/pause_test.go
@@ -0,0 +1,160 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ "gotest.tools/v3/icmd"
+)
+
+func TestPause(t *testing.T) {
+ if _, ok := os.LookupEnv("CI"); ok {
+ t.Skip("Skipping test on CI... flaky")
+ }
+ cli := NewParallelCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=e2e-pause",
+ "COMPOSE_FILE=./fixtures/pause/compose.yaml"))
+
+ cleanup := func() {
+ cli.RunDockerComposeCmd(t, "down", "-v", "--remove-orphans", "-t", "0")
+ }
+ cleanup()
+ t.Cleanup(cleanup)
+
+ // launch both services and verify that they are accessible
+ cli.RunDockerComposeCmd(t, "up", "-d")
+ urls := map[string]string{
+ "a": urlForService(t, cli, "a", 80),
+ "b": urlForService(t, cli, "b", 80),
+ }
+ for _, url := range urls {
+ HTTPGetWithRetry(t, url, http.StatusOK, 50*time.Millisecond, 20*time.Second)
+ }
+
+ // pause a and verify that it can no longer be hit but b still can
+ cli.RunDockerComposeCmd(t, "pause", "a")
+ httpClient := http.Client{Timeout: 250 * time.Millisecond}
+ resp, err := httpClient.Get(urls["a"])
+ if resp != nil {
+ _ = resp.Body.Close()
+ }
+ require.Error(t, err, "a should no longer respond")
+ var netErr net.Error
+ errors.As(err, &netErr)
+ require.True(t, netErr.Timeout(), "Error should have indicated a timeout")
+ HTTPGetWithRetry(t, urls["b"], http.StatusOK, 50*time.Millisecond, 5*time.Second)
+
+ // unpause a and verify that both containers work again
+ cli.RunDockerComposeCmd(t, "unpause", "a")
+ for _, url := range urls {
+ HTTPGetWithRetry(t, url, http.StatusOK, 50*time.Millisecond, 5*time.Second)
+ }
+}
+
+func TestPauseServiceNotRunning(t *testing.T) {
+ cli := NewParallelCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=e2e-pause-svc-not-running",
+ "COMPOSE_FILE=./fixtures/pause/compose.yaml"))
+
+ cleanup := func() {
+ cli.RunDockerComposeCmd(t, "down", "-v", "--remove-orphans", "-t", "0")
+ }
+ cleanup()
+ t.Cleanup(cleanup)
+
+ // pause a and verify that it can no longer be hit but b still can
+ res := cli.RunDockerComposeCmdNoCheck(t, "pause", "a")
+
+ // TODO: `docker pause` errors in this case, should Compose be consistent?
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+}
+
+func TestPauseServiceAlreadyPaused(t *testing.T) {
+ cli := NewParallelCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=e2e-pause-svc-already-paused",
+ "COMPOSE_FILE=./fixtures/pause/compose.yaml"))
+
+ cleanup := func() {
+ cli.RunDockerComposeCmd(t, "down", "-v", "--remove-orphans", "-t", "0")
+ }
+ cleanup()
+ t.Cleanup(cleanup)
+
+ // launch a and wait for it to come up
+ cli.RunDockerComposeCmd(t, "up", "--menu=false", "--wait", "a")
+ HTTPGetWithRetry(t, urlForService(t, cli, "a", 80), http.StatusOK, 50*time.Millisecond, 10*time.Second)
+
+ // pause a twice - first time should pass, second time fail
+ cli.RunDockerComposeCmd(t, "pause", "a")
+ res := cli.RunDockerComposeCmdNoCheck(t, "pause", "a")
+ res.Assert(t, icmd.Expected{ExitCode: 1, Err: "already paused"})
+}
+
+func TestPauseServiceDoesNotExist(t *testing.T) {
+ cli := NewParallelCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=e2e-pause-svc-not-exist",
+ "COMPOSE_FILE=./fixtures/pause/compose.yaml"))
+
+ cleanup := func() {
+ cli.RunDockerComposeCmd(t, "down", "-v", "--remove-orphans", "-t", "0")
+ }
+ cleanup()
+ t.Cleanup(cleanup)
+
+ // pause a and verify that it can no longer be hit but b still can
+ res := cli.RunDockerComposeCmdNoCheck(t, "pause", "does_not_exist")
+ // TODO: `compose down does_not_exist` and similar error, this should too
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+}
+
+func urlForService(t testing.TB, cli *CLI, service string, targetPort int) string {
+ t.Helper()
+ return fmt.Sprintf(
+ "http://localhost:%d",
+ publishedPortForService(t, cli, service, targetPort),
+ )
+}
+
+func publishedPortForService(t testing.TB, cli *CLI, service string, targetPort int) int {
+ t.Helper()
+ res := cli.RunDockerComposeCmd(t, "ps", "--format=json", service)
+ var svc struct {
+ Publishers []struct {
+ TargetPort int
+ PublishedPort int
+ }
+ }
+ require.NoError(t, json.Unmarshal([]byte(res.Stdout()), &svc),
+ "Failed to parse `%s` output", res.Cmd.String())
+ for _, pp := range svc.Publishers {
+ if pp.TargetPort == targetPort {
+ return pp.PublishedPort
+ }
+ }
+ require.Failf(t, "No published port for target port",
+ "Target port: %d\nService: %s", targetPort, res.Combined())
+ return -1
+}
diff --git a/pkg/e2e/profiles_test.go b/pkg/e2e/profiles_test.go
new file mode 100644
index 00000000000..dffc209d00e
--- /dev/null
+++ b/pkg/e2e/profiles_test.go
@@ -0,0 +1,207 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "strings"
+ "testing"
+
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
+)
+
+const (
+ profiledService = "profiled-service"
+ regularService = "regular-service"
+)
+
+func TestExplicitProfileUsage(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "compose-e2e-explicit-profiles"
+ const profileName = "test-profile"
+
+ t.Run("compose up with profile", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/compose.yaml",
+ "-p", projectName, "--profile", profileName, "up", "-d")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps")
+ res.Assert(t, icmd.Expected{Out: regularService})
+ res.Assert(t, icmd.Expected{Out: profiledService})
+ })
+
+ t.Run("compose stop with profile", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/compose.yaml",
+ "-p", projectName, "--profile", profileName, "stop")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps", "--status", "running")
+ assert.Assert(t, !strings.Contains(res.Combined(), regularService))
+ assert.Assert(t, !strings.Contains(res.Combined(), profiledService))
+ })
+
+ t.Run("compose start with profile", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/compose.yaml",
+ "-p", projectName, "--profile", profileName, "start")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps", "--status", "running")
+ res.Assert(t, icmd.Expected{Out: regularService})
+ res.Assert(t, icmd.Expected{Out: profiledService})
+ })
+
+ t.Run("compose restart with profile", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/compose.yaml",
+ "-p", projectName, "--profile", profileName, "restart")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps", "--status", "running")
+ res.Assert(t, icmd.Expected{Out: regularService})
+ res.Assert(t, icmd.Expected{Out: profiledService})
+ })
+
+ t.Run("down", func(t *testing.T) {
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+
+ t.Run("check containers after down", func(t *testing.T) {
+ res := c.RunDockerCmd(t, "ps")
+ assert.Assert(t, !strings.Contains(res.Combined(), projectName), res.Combined())
+ })
+}
+
+func TestNoProfileUsage(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "compose-e2e-no-profiles"
+
+ t.Run("compose up without profile", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/compose.yaml",
+ "-p", projectName, "up", "-d")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps")
+ res.Assert(t, icmd.Expected{Out: regularService})
+ assert.Assert(t, !strings.Contains(res.Combined(), profiledService))
+ })
+
+ t.Run("compose stop without profile", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/compose.yaml",
+ "-p", projectName, "stop")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps", "--status", "running")
+ assert.Assert(t, !strings.Contains(res.Combined(), regularService))
+ assert.Assert(t, !strings.Contains(res.Combined(), profiledService))
+ })
+
+ t.Run("compose start without profile", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/compose.yaml",
+ "-p", projectName, "start")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps", "--status", "running")
+ res.Assert(t, icmd.Expected{Out: regularService})
+ assert.Assert(t, !strings.Contains(res.Combined(), profiledService))
+ })
+
+ t.Run("compose restart without profile", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/compose.yaml",
+ "-p", projectName, "restart")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps", "--status", "running")
+ res.Assert(t, icmd.Expected{Out: regularService})
+ assert.Assert(t, !strings.Contains(res.Combined(), profiledService))
+ })
+
+ t.Run("down", func(t *testing.T) {
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+
+ t.Run("check containers after down", func(t *testing.T) {
+ res := c.RunDockerCmd(t, "ps")
+ assert.Assert(t, !strings.Contains(res.Combined(), projectName), res.Combined())
+ })
+}
+
+func TestActiveProfileViaTargetedService(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "compose-e2e-via-target-service-profiles"
+ const profileName = "test-profile"
+
+ t.Run("compose up with service name", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/compose.yaml",
+ "-p", projectName, "up", profiledService, "-d")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps")
+ assert.Assert(t, !strings.Contains(res.Combined(), regularService))
+ res.Assert(t, icmd.Expected{Out: profiledService})
+
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "--profile", profileName, "ps")
+ assert.Assert(t, !strings.Contains(res.Combined(), regularService))
+ res.Assert(t, icmd.Expected{Out: profiledService})
+ })
+
+ t.Run("compose stop with service name", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/compose.yaml",
+ "-p", projectName, "stop", profiledService)
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps", "--status", "running")
+ assert.Assert(t, !strings.Contains(res.Combined(), regularService))
+ assert.Assert(t, !strings.Contains(res.Combined(), profiledService))
+ })
+
+ t.Run("compose start with service name", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/compose.yaml",
+ "-p", projectName, "start", profiledService)
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps", "--status", "running")
+ assert.Assert(t, !strings.Contains(res.Combined(), regularService))
+ res.Assert(t, icmd.Expected{Out: profiledService})
+ })
+
+ t.Run("compose restart with service name", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/compose.yaml",
+ "-p", projectName, "restart")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps", "--status", "running")
+ assert.Assert(t, !strings.Contains(res.Combined(), regularService))
+ res.Assert(t, icmd.Expected{Out: profiledService})
+ })
+
+ t.Run("down", func(t *testing.T) {
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+
+ t.Run("check containers after down", func(t *testing.T) {
+ res := c.RunDockerCmd(t, "ps")
+ assert.Assert(t, !strings.Contains(res.Combined(), projectName), res.Combined())
+ })
+}
+
+func TestDotEnvProfileUsage(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "compose-e2e-dotenv-profiles"
+ const profileName = "test-profile"
+
+ t.Cleanup(func() {
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+
+ t.Run("compose up with profile", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/compose.yaml",
+ "--env-file", "./fixtures/profiles/test-profile.env",
+ "-p", projectName, "--profile", profileName, "up", "-d")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ res = c.RunDockerComposeCmd(t, "-p", projectName, "ps")
+ res.Assert(t, icmd.Expected{Out: regularService})
+ res.Assert(t, icmd.Expected{Out: profiledService})
+ })
+}
diff --git a/pkg/e2e/providers_test.go b/pkg/e2e/providers_test.go
new file mode 100644
index 00000000000..b026f1f1434
--- /dev/null
+++ b/pkg/e2e/providers_test.go
@@ -0,0 +1,64 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "slices"
+ "strings"
+ "testing"
+
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
+)
+
+func TestDependsOnMultipleProviders(t *testing.T) {
+ provider, err := findExecutable("example-provider")
+ assert.NilError(t, err)
+
+ path := fmt.Sprintf("%s%s%s", os.Getenv("PATH"), string(os.PathListSeparator), filepath.Dir(provider))
+ c := NewParallelCLI(t, WithEnv("PATH="+path))
+ const projectName = "depends-on-multiple-providers"
+ t.Cleanup(func() {
+ c.cleanupWithDown(t, projectName)
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/providers/depends-on-multiple-providers.yaml", "--project-name", projectName, "up")
+ res.Assert(t, icmd.Success)
+ env := getEnv(res.Combined(), false)
+ assert.Check(t, slices.Contains(env, "PROVIDER1_URL=https://magic.cloud/provider1"), env)
+ assert.Check(t, slices.Contains(env, "PROVIDER2_URL=https://magic.cloud/provider2"), env)
+}
+
+func getEnv(out string, run bool) []string {
+ var env []string
+ scanner := bufio.NewScanner(strings.NewReader(out))
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !run && strings.HasPrefix(line, "test-1 | ") {
+ env = append(env, line[10:])
+ }
+ if run && strings.Contains(line, "=") && len(strings.Split(line, "=")) == 2 {
+ env = append(env, line)
+ }
+ }
+ slices.Sort(env)
+ return env
+}
diff --git a/pkg/e2e/ps_test.go b/pkg/e2e/ps_test.go
new file mode 100644
index 00000000000..77a49accb75
--- /dev/null
+++ b/pkg/e2e/ps_test.go
@@ -0,0 +1,132 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "encoding/json"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gotest.tools/v3/icmd"
+
+ "github.com/docker/compose/v5/pkg/api"
+)
+
+func TestPs(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "e2e-ps"
+
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/ps-test/compose.yaml", "--project-name", projectName, "up", "-d")
+ require.NoError(t, res.Error)
+ t.Cleanup(func() {
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+
+ assert.Contains(t, res.Combined(), "Container e2e-ps-busybox-1 Started", res.Combined())
+
+ t.Run("table", func(t *testing.T) {
+ res = c.RunDockerComposeCmd(t, "-f", "./fixtures/ps-test/compose.yaml", "--project-name", projectName, "ps")
+ lines := strings.Split(res.Stdout(), "\n")
+ assert.Len(t, lines, 4)
+ count := 0
+ for _, line := range lines[1:3] {
+ if strings.Contains(line, "e2e-ps-busybox-1") {
+ assert.Contains(t, line, "127.0.0.1:8001->8000/tcp")
+ count++
+ }
+ if strings.Contains(line, "e2e-ps-nginx-1") {
+ assert.Contains(t, line, "80/tcp, 443/tcp, 8080/tcp")
+ count++
+ }
+ }
+ assert.Equal(t, 2, count, "Did not match both services:\n"+res.Combined())
+ })
+
+ t.Run("json", func(t *testing.T) {
+ res = c.RunDockerComposeCmd(t, "-f", "./fixtures/ps-test/compose.yaml", "--project-name", projectName, "ps",
+ "--format", "json")
+ type element struct {
+ Name string
+ Project string
+ Publishers api.PortPublishers
+ }
+ var output []element
+ out := res.Stdout()
+ dec := json.NewDecoder(strings.NewReader(out))
+ for dec.More() {
+ var s element
+ require.NoError(t, dec.Decode(&s), "Failed to unmarshal ps JSON output")
+ output = append(output, s)
+ }
+
+ count := 0
+ assert.Len(t, output, 2)
+ for _, service := range output {
+ assert.Equal(t, projectName, service.Project)
+ publishers := service.Publishers
+ if service.Name == "e2e-ps-busybox-1" {
+ assert.Len(t, publishers, 1)
+ assert.Equal(t, api.PortPublishers{
+ {
+ URL: "127.0.0.1",
+ TargetPort: 8000,
+ PublishedPort: 8001,
+ Protocol: "tcp",
+ },
+ }, publishers)
+ count++
+ }
+ if service.Name == "e2e-ps-nginx-1" {
+ assert.Len(t, publishers, 3)
+ assert.Equal(t, api.PortPublishers{
+ {TargetPort: 80, Protocol: "tcp"},
+ {TargetPort: 443, Protocol: "tcp"},
+ {TargetPort: 8080, Protocol: "tcp"},
+ }, publishers)
+
+ count++
+ }
+ }
+ assert.Equal(t, 2, count, "Did not match both services:\n"+res.Combined())
+ })
+
+ t.Run("ps --all", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "stop")
+ require.NoError(t, res.Error)
+
+ res = c.RunDockerComposeCmd(t, "-f", "./fixtures/ps-test/compose.yaml", "--project-name", projectName, "ps")
+ lines := strings.Split(res.Stdout(), "\n")
+ assert.Len(t, lines, 2)
+
+ res = c.RunDockerComposeCmd(t, "-f", "./fixtures/ps-test/compose.yaml", "--project-name", projectName, "ps", "--all")
+ lines = strings.Split(res.Stdout(), "\n")
+ assert.Len(t, lines, 4)
+ })
+
+ t.Run("ps unknown", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "stop")
+ require.NoError(t, res.Error)
+
+ res = c.RunDockerComposeCmd(t, "-f", "./fixtures/ps-test/compose.yaml", "--project-name", projectName, "ps", "nginx")
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerComposeCmdNoCheck(t, "-f", "./fixtures/ps-test/compose.yaml", "--project-name", projectName, "ps", "unknown")
+ res.Assert(t, icmd.Expected{ExitCode: 1, Err: "no such service: unknown"})
+ })
+}
diff --git a/pkg/e2e/publish_test.go b/pkg/e2e/publish_test.go
new file mode 100644
index 00000000000..7e901552072
--- /dev/null
+++ b/pkg/e2e/publish_test.go
@@ -0,0 +1,214 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
+)
+
+func TestPublishChecks(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "compose-e2e-explicit-profiles"
+
+ t.Run("publish error environment", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "./fixtures/publish/compose-environment.yml",
+ "-p", projectName, "publish", "test/test")
+ res.Assert(t, icmd.Expected{ExitCode: 1, Err: `service "serviceA" has environment variable(s) declared.
+To avoid leaking sensitive data,`})
+ })
+
+ t.Run("publish error env_file", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "./fixtures/publish/compose-env-file.yml",
+ "-p", projectName, "publish", "test/test")
+ res.Assert(t, icmd.Expected{ExitCode: 1, Err: `service "serviceA" has env_file declared.
+service "serviceA" has environment variable(s) declared.
+To avoid leaking sensitive data,`})
+ })
+
+ t.Run("publish multiple errors env_file and environment", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "./fixtures/publish/compose-multi-env-config.yml",
+ "-p", projectName, "publish", "test/test")
+ // we don't in which order the services will be loaded, so we can't predict the order of the error messages
+ assert.Assert(t, strings.Contains(res.Combined(), `service "serviceB" has env_file declared.`), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), `service "serviceB" has environment variable(s) declared.`), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), `service "serviceA" has environment variable(s) declared.`), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), `To avoid leaking sensitive data, you must either explicitly allow the sending of environment variables by using the --with-env flag,
+or remove sensitive data from your Compose configuration
+`), res.Combined())
+ })
+
+ t.Run("publish success environment", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/publish/compose-environment.yml",
+ "-p", projectName, "publish", "test/test", "--with-env", "-y", "--dry-run")
+ assert.Assert(t, strings.Contains(res.Combined(), "test/test publishing"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "test/test published"), res.Combined())
+ })
+
+ t.Run("publish success env_file", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/publish/compose-env-file.yml",
+ "-p", projectName, "publish", "test/test", "--with-env", "-y", "--dry-run")
+ assert.Assert(t, strings.Contains(res.Combined(), "test/test publishing"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "test/test published"), res.Combined())
+ })
+
+ t.Run("publish approve validation message", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/publish/compose-env-file.yml",
+ "-p", projectName, "publish", "test/test", "--with-env", "--dry-run")
+ cmd.Stdin = strings.NewReader("y\n")
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ assert.Assert(t, strings.Contains(res.Combined(), "Are you ok to publish these environment variables?"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "test/test publishing"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "test/test published"), res.Combined())
+ })
+
+ t.Run("publish refuse validation message", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/publish/compose-env-file.yml",
+ "-p", projectName, "publish", "test/test", "--with-env", "--dry-run")
+ cmd.Stdin = strings.NewReader("n\n")
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ assert.Assert(t, strings.Contains(res.Combined(), "Are you ok to publish these environment variables?"), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), "test/test publishing"), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), "test/test published"), res.Combined())
+ })
+
+ t.Run("publish with extends", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/publish/compose-with-extends.yml",
+ "-p", projectName, "publish", "test/test", "--dry-run")
+ assert.Assert(t, strings.Contains(res.Combined(), "test/test published"), res.Combined())
+ })
+
+ t.Run("publish list env variables", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/publish/compose-multi-env-config.yml",
+ "-p", projectName, "publish", "test/test", "--with-env", "--dry-run")
+ cmd.Stdin = strings.NewReader("n\n")
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ out := res.Combined()
+ assert.Assert(t, strings.Contains(out, `you are about to publish environment variables within your OCI artifact.
+please double check that you are not leaking sensitive data`), out)
+ assert.Assert(t, strings.Contains(out, `Service/Config serviceA
+FOO=bar`), out)
+ assert.Assert(t, strings.Contains(out, `Service/Config serviceB`), out)
+ // we don't know in which order the env variables will be loaded
+ assert.Assert(t, strings.Contains(out, `FOO=bar`), out)
+ assert.Assert(t, strings.Contains(out, `BAR=baz`), out)
+ assert.Assert(t, strings.Contains(out, `QUIX=`), out)
+ })
+
+ t.Run("refuse to publish with bind mount", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/publish/compose-bind-mount.yml",
+ "-p", projectName, "publish", "test/test", "--dry-run")
+ cmd.Stdin = strings.NewReader("n\n")
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ out := res.Combined()
+ assert.Assert(t, strings.Contains(out, "you are about to publish bind mounts declaration within your OCI artifact."), out)
+ assert.Assert(t, strings.Contains(out, "e2e/fixtures/publish:/user-data"), out)
+ assert.Assert(t, strings.Contains(out, "Are you ok to publish these bind mount declarations?"), out)
+ assert.Assert(t, !strings.Contains(out, "serviceA published"), out)
+ })
+
+ t.Run("publish with bind mount", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/publish/compose-bind-mount.yml",
+ "-p", projectName, "publish", "test/test", "--dry-run")
+ cmd.Stdin = strings.NewReader("y\n")
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ assert.Assert(t, strings.Contains(res.Combined(), "you are about to publish bind mounts declaration within your OCI artifact."), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "Are you ok to publish these bind mount declarations?"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "e2e/fixtures/publish:/user-data"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "test/test published"), res.Combined())
+ })
+
+ t.Run("refuse to publish with build section only", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "./fixtures/publish/compose-build-only.yml",
+ "-p", projectName, "publish", "test/test", "--with-env", "-y", "--dry-run")
+ res.Assert(t, icmd.Expected{ExitCode: 1})
+ assert.Assert(t, strings.Contains(res.Combined(), "your Compose stack cannot be published as it only contains a build section for service(s):"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "serviceA"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "serviceB"), res.Combined())
+ })
+
+ t.Run("refuse to publish with local include", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "./fixtures/publish/compose-local-include.yml",
+ "-p", projectName, "publish", "test/test", "--dry-run")
+ res.Assert(t, icmd.Expected{ExitCode: 1, Err: "cannot publish compose file with local includes"})
+ })
+
+ t.Run("detect sensitive data", func(t *testing.T) {
+ cmd := c.NewDockerComposeCmd(t, "-f", "./fixtures/publish/compose-sensitive.yml",
+ "-p", projectName, "publish", "test/test", "--with-env", "--dry-run")
+ cmd.Stdin = strings.NewReader("n\n")
+ res := icmd.RunCmd(cmd)
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+
+ output := res.Combined()
+ assert.Assert(t, strings.Contains(output, "you are about to publish sensitive data within your OCI artifact.\n"), output)
+ assert.Assert(t, strings.Contains(output, "please double check that you are not leaking sensitive data"), output)
+ assert.Assert(t, strings.Contains(output, "AWS Client ID\n\"services.serviceA.environment.AWS_ACCESS_KEY_ID\": A3TX1234567890ABCDEF"), output)
+ assert.Assert(t, strings.Contains(output, "AWS Secret Key\n\"services.serviceA.environment.AWS_SECRET_ACCESS_KEY\": aws\"12345+67890/abcdefghijklm+NOPQRSTUVWXYZ+\""), output)
+ assert.Assert(t, strings.Contains(output, "Github authentication\n\"GITHUB_TOKEN\": ghp_1234567890abcdefghijklmnopqrstuvwxyz"), output)
+ assert.Assert(t, strings.Contains(output, "JSON Web Token\n\"\": eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."+
+ "eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw"), output)
+ assert.Assert(t, strings.Contains(output, "Private Key\n\"\": -----BEGIN DSA PRIVATE KEY-----\nwxyz+ABC=\n-----END DSA PRIVATE KEY-----"), output)
+ })
+}
+
+func TestPublish(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "compose-e2e-publish"
+ const registryName = projectName + "-registry"
+ c.RunDockerCmd(t, "run", "--name", registryName, "-P", "-d", "registry:3")
+ port := c.RunDockerCmd(t, "inspect", "--format", `{{ (index (index .NetworkSettings.Ports "5000/tcp") 0).HostPort }}`, registryName).Stdout()
+ registry := "localhost:" + strings.TrimSpace(port)
+ t.Cleanup(func() {
+ c.RunDockerCmd(t, "rm", "--force", registryName)
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/publish/oci/compose.yaml", "-f", "./fixtures/publish/oci/compose-override.yaml",
+ "-p", projectName, "publish", "--with-env", "--yes", "--insecure-registry", registry+"/test:test")
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+
+ // docker exec -it compose-e2e-publish-registry tree /var/lib/registry/docker/registry/v2/
+
+ cmd := c.NewDockerComposeCmd(t, "--verbose", "--project-name=oci",
+ "--insecure-registry", registry,
+ "-f", fmt.Sprintf("oci://%s/test:test", registry), "config")
+ res = icmd.RunCmd(cmd, func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "XDG_CACHE_HOME="+t.TempDir())
+ })
+ res.Assert(t, icmd.Expected{ExitCode: 0})
+ assert.Equal(t, res.Stdout(), `name: oci
+services:
+ app:
+ environment:
+ HELLO: WORLD
+ image: alpine
+ networks:
+ default: null
+networks:
+ default:
+ name: oci_default
+`)
+}
diff --git a/pkg/e2e/pull_test.go b/pkg/e2e/pull_test.go
new file mode 100644
index 00000000000..799bdbb2fc7
--- /dev/null
+++ b/pkg/e2e/pull_test.go
@@ -0,0 +1,76 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "strings"
+ "testing"
+
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
+)
+
+func TestComposePull(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Run("Verify image pulled", func(t *testing.T) {
+ // cleanup existing images
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/compose-pull/simple", "down", "--rmi", "all")
+
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/compose-pull/simple", "pull")
+ output := res.Combined()
+
+ assert.Assert(t, strings.Contains(output, "Image alpine:3.14 Pulled"))
+ assert.Assert(t, strings.Contains(output, "Image alpine:3.15 Pulled"))
+
+ // verify default policy is 'always' for pull command
+ res = c.RunDockerComposeCmd(t, "--project-directory", "fixtures/compose-pull/simple", "pull")
+ output = res.Combined()
+
+ assert.Assert(t, strings.Contains(output, "Image alpine:3.14 Pulled"))
+ assert.Assert(t, strings.Contains(output, "Image alpine:3.15 Pulled"))
+ })
+
+ t.Run("Verify skipped pull if image is already present locally", func(t *testing.T) {
+ // make sure the required image is present
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/compose-pull/image-present-locally", "pull")
+
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/compose-pull/image-present-locally", "pull")
+ output := res.Combined()
+
+ assert.Assert(t, strings.Contains(output, "alpine:3.13.12 Skipped Image is already present locally"))
+ // image with :latest tag gets pulled regardless if pull_policy: missing or if_not_present
+ assert.Assert(t, strings.Contains(output, "alpine:latest Pulled"))
+ })
+
+ t.Run("Verify skipped no image to be pulled", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/compose-pull/no-image-name-given", "pull")
+ output := res.Combined()
+
+ assert.Assert(t, strings.Contains(output, "Skipped No image to be pulled"))
+ })
+
+ t.Run("Verify pull failure", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "--project-directory", "fixtures/compose-pull/unknown-image", "pull")
+ res.Assert(t, icmd.Expected{ExitCode: 1, Err: "pull access denied for does_not_exists"})
+ })
+
+ t.Run("Verify ignore pull failure", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/compose-pull/unknown-image", "pull", "--ignore-pull-failures")
+ res.Assert(t, icmd.Expected{Err: "Some service image(s) must be built from source by running:"})
+ })
+}
diff --git a/pkg/e2e/recreate_no_deps_test.go b/pkg/e2e/recreate_no_deps_test.go
new file mode 100644
index 00000000000..2b32e0d5bc3
--- /dev/null
+++ b/pkg/e2e/recreate_no_deps_test.go
@@ -0,0 +1,39 @@
+/*
+ Copyright 2022 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "testing"
+
+ "gotest.tools/v3/icmd"
+)
+
+func TestRecreateWithNoDeps(t *testing.T) {
+ c := NewParallelCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=recreate-no-deps",
+ ))
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/dependencies/recreate-no-deps.yaml", "up", "-d")
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/dependencies/recreate-no-deps.yaml", "up", "-d", "--force-recreate", "--no-deps", "my-service")
+ res.Assert(t, icmd.Success)
+
+ RequireServiceState(t, c, "my-service", "running")
+
+ c.RunDockerComposeCmd(t, "down")
+}
diff --git a/pkg/e2e/restart_test.go b/pkg/e2e/restart_test.go
index 9b2e2ccf4d4..8b81e228a18 100644
--- a/pkg/e2e/restart_test.go
+++ b/pkg/e2e/restart_test.go
@@ -26,39 +26,90 @@ import (
"gotest.tools/v3/assert"
)
+func assertServiceStatus(t *testing.T, projectName, service, status string, ps string) {
+ // match output with random spaces like:
+ // e2e-start-stop-db-1 alpine:latest "echo hello" db 1 minutes ago Exited (0) 1 minutes ago
+ regx := fmt.Sprintf("%s-%s-1.+%s\\s+.+%s.+", projectName, service, service, status)
+ testify.Regexp(t, regx, ps)
+}
+
func TestRestart(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
const projectName = "e2e-restart"
- getServiceRegx := func(service string, status string) string {
- // match output with random spaces like:
- // e2e-start-stop-db-1 "echo hello" db running
- return fmt.Sprintf("%s-%s-1.+%s\\s+%s", projectName, service, service, status)
- }
-
t.Run("Up a project", func(t *testing.T) {
// This is just to ensure the containers do NOT exist
- c.RunDockerOrExitError("compose", "--project-name", projectName, "down")
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
- res := c.RunDockerOrExitError("compose", "-f", "./fixtures/restart-test/compose.yaml", "--project-name", projectName, "up", "-d")
- assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-restart-restart-1 Started"), res.Combined())
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/restart-test/compose.yaml", "--project-name", projectName, "up", "-d")
+ assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-restart-restart-1 Started"), res.Combined())
- c.WaitForCmdResult(c.NewDockerCmd("compose", "--project-name", projectName, "ps", "-a", "--format", "json"),
- StdoutContains(`"State":"exited"`),
- 10*time.Second, 1*time.Second)
+ c.WaitForCmdResult(t, c.NewDockerComposeCmd(t, "--project-name", projectName, "ps", "-a", "--format",
+ "json"),
+ StdoutContains(`"State":"exited"`), 10*time.Second, 1*time.Second)
- res = c.RunDockerOrExitError("compose", "--project-name", projectName, "ps", "-a")
- testify.Regexp(t, getServiceRegx("restart", "exited"), res.Stdout())
+ res = c.RunDockerComposeCmd(t, "--project-name", projectName, "ps", "-a")
+ assertServiceStatus(t, projectName, "restart", "Exited", res.Stdout())
- _ = c.RunDockerOrExitError("compose", "-f", "./fixtures/restart-test/compose.yaml", "--project-name", projectName, "restart")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/restart-test/compose.yaml", "--project-name", projectName, "restart")
// Give the same time but it must NOT exit
time.Sleep(time.Second)
- res = c.RunDockerOrExitError("compose", "--project-name", projectName, "ps")
- testify.Regexp(t, getServiceRegx("restart", "running"), res.Stdout())
+ res = c.RunDockerComposeCmd(t, "--project-name", projectName, "ps")
+ assertServiceStatus(t, projectName, "restart", "Up", res.Stdout())
// Clean up
- c.RunDockerOrExitError("compose", "--project-name", projectName, "down")
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+}
+
+func TestRestartWithDependencies(t *testing.T) {
+ c := NewCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=e2e-restart-deps",
+ ))
+ baseService := "nginx"
+ depWithRestart := "with-restart"
+ depNoRestart := "no-restart"
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "down", "--remove-orphans")
})
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/restart-test/compose-depends-on.yaml", "up", "-d")
+
+ res := c.RunDockerComposeCmd(t, "restart", baseService)
+ out := res.Combined()
+ assert.Assert(t, strings.Contains(out, fmt.Sprintf("Container e2e-restart-deps-%s-1 Restarting", baseService)), out)
+ assert.Assert(t, strings.Contains(out, fmt.Sprintf("Container e2e-restart-deps-%s-1 Healthy", baseService)), out)
+ assert.Assert(t, strings.Contains(out, fmt.Sprintf("Container e2e-restart-deps-%s-1 Started", depWithRestart)), out)
+ assert.Assert(t, !strings.Contains(out, depNoRestart), out)
+
+ c = NewParallelCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=e2e-restart-deps",
+ "LABEL=recreate",
+ ))
+ res = c.RunDockerComposeCmd(t, "-f", "./fixtures/restart-test/compose-depends-on.yaml", "up", "-d")
+ out = res.Combined()
+ assert.Assert(t, strings.Contains(out, fmt.Sprintf("Container e2e-restart-deps-%s-1 Stopped", depWithRestart)), out)
+ assert.Assert(t, strings.Contains(out, fmt.Sprintf("Container e2e-restart-deps-%s-1 Recreated", baseService)), out)
+ assert.Assert(t, strings.Contains(out, fmt.Sprintf("Container e2e-restart-deps-%s-1 Healthy", baseService)), out)
+ assert.Assert(t, strings.Contains(out, fmt.Sprintf("Container e2e-restart-deps-%s-1 Started", depWithRestart)), out)
+ assert.Assert(t, strings.Contains(out, fmt.Sprintf("Container e2e-restart-deps-%s-1 Running", depNoRestart)), out)
+}
+
+func TestRestartWithProfiles(t *testing.T) {
+ c := NewParallelCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=e2e-restart-profiles",
+ ))
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "down", "--remove-orphans")
+ })
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/restart-test/compose.yaml", "--profile", "test", "up", "-d")
+
+ res := c.RunDockerComposeCmd(t, "restart", "test")
+ fmt.Println(res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-restart-profiles-test-1 Started"), res.Combined())
}
diff --git a/pkg/e2e/scale_test.go b/pkg/e2e/scale_test.go
new file mode 100644
index 00000000000..7a859187bae
--- /dev/null
+++ b/pkg/e2e/scale_test.go
@@ -0,0 +1,217 @@
+/*
+Copyright 2020 Docker Compose CLI authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package e2e
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ testify "github.com/stretchr/testify/assert"
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
+)
+
+const NO_STATE_TO_CHECK = ""
+
+func TestScaleBasicCases(t *testing.T) {
+ c := NewCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=scale-basic-tests"))
+
+ reset := func() {
+ c.RunDockerComposeCmd(t, "down", "--rmi", "all")
+ }
+ t.Cleanup(reset)
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/scale", "up", "-d")
+ res.Assert(t, icmd.Success)
+
+ t.Log("scale up one service")
+ res = c.RunDockerComposeCmd(t, "--project-directory", "fixtures/scale", "scale", "dbadmin=2")
+ out := res.Combined()
+ checkServiceContainer(t, out, "scale-basic-tests-dbadmin", "Started", 2)
+
+ t.Log("scale up 2 services")
+ res = c.RunDockerComposeCmd(t, "--project-directory", "fixtures/scale", "scale", "front=3", "back=2")
+ out = res.Combined()
+ checkServiceContainer(t, out, "scale-basic-tests-front", "Running", 2)
+ checkServiceContainer(t, out, "scale-basic-tests-front", "Started", 1)
+ checkServiceContainer(t, out, "scale-basic-tests-back", "Running", 1)
+ checkServiceContainer(t, out, "scale-basic-tests-back", "Started", 1)
+
+ t.Log("scale down one service")
+ res = c.RunDockerComposeCmd(t, "--project-directory", "fixtures/scale", "scale", "dbadmin=1")
+ out = res.Combined()
+ checkServiceContainer(t, out, "scale-basic-tests-dbadmin", "Running", 1)
+
+ t.Log("scale to 0 a service")
+ res = c.RunDockerComposeCmd(t, "--project-directory", "fixtures/scale", "scale", "dbadmin=0")
+ assert.Check(t, res.Stdout() == "", res.Stdout())
+
+ t.Log("scale down 2 services")
+ res = c.RunDockerComposeCmd(t, "--project-directory", "fixtures/scale", "scale", "front=2", "back=1")
+ out = res.Combined()
+ checkServiceContainer(t, out, "scale-basic-tests-front", "Running", 2)
+ assert.Check(t, !strings.Contains(out, "Container scale-basic-tests-front-3 Running"), res.Combined())
+ checkServiceContainer(t, out, "scale-basic-tests-back", "Running", 1)
+}
+
+func TestScaleWithDepsCases(t *testing.T) {
+ c := NewCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=scale-deps-tests"))
+
+ reset := func() {
+ c.RunDockerComposeCmd(t, "down", "--rmi", "all")
+ }
+ t.Cleanup(reset)
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/scale", "up", "-d", "--scale", "db=2")
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerComposeCmd(t, "ps")
+ checkServiceContainer(t, res.Combined(), "scale-deps-tests-db", NO_STATE_TO_CHECK, 2)
+
+ t.Log("scale up 1 service with --no-deps")
+ _ = c.RunDockerComposeCmd(t, "--project-directory", "fixtures/scale", "scale", "--no-deps", "back=2")
+ res = c.RunDockerComposeCmd(t, "ps")
+ checkServiceContainer(t, res.Combined(), "scale-deps-tests-back", NO_STATE_TO_CHECK, 2)
+ checkServiceContainer(t, res.Combined(), "scale-deps-tests-db", NO_STATE_TO_CHECK, 2)
+
+ t.Log("scale up 1 service without --no-deps")
+ _ = c.RunDockerComposeCmd(t, "--project-directory", "fixtures/scale", "scale", "back=2")
+ res = c.RunDockerComposeCmd(t, "ps")
+ checkServiceContainer(t, res.Combined(), "scale-deps-tests-back", NO_STATE_TO_CHECK, 2)
+ checkServiceContainer(t, res.Combined(), "scale-deps-tests-db", NO_STATE_TO_CHECK, 1)
+}
+
+func TestScaleUpAndDownPreserveContainerNumber(t *testing.T) {
+ const projectName = "scale-up-down-test"
+
+ c := NewCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME="+projectName))
+
+ reset := func() {
+ c.RunDockerComposeCmd(t, "down", "--rmi", "all")
+ }
+ t.Cleanup(reset)
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/scale", "up", "-d", "--scale", "db=2", "db")
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerComposeCmd(t, "ps", "--format", "{{.Name}}", "db")
+ res.Assert(t, icmd.Success)
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), projectName+"-db-1\n"+projectName+"-db-2")
+
+ t.Log("scale down removes replica #2")
+ res = c.RunDockerComposeCmd(t, "--project-directory", "fixtures/scale", "up", "-d", "--scale", "db=1", "db")
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerComposeCmd(t, "ps", "--format", "{{.Name}}", "db")
+ res.Assert(t, icmd.Success)
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), projectName+"-db-1")
+
+ t.Log("scale up restores replica #2")
+ res = c.RunDockerComposeCmd(t, "--project-directory", "fixtures/scale", "up", "-d", "--scale", "db=2", "db")
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerComposeCmd(t, "ps", "--format", "{{.Name}}", "db")
+ res.Assert(t, icmd.Success)
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), projectName+"-db-1\n"+projectName+"-db-2")
+}
+
+func TestScaleDownRemovesObsolete(t *testing.T) {
+ const projectName = "scale-down-obsolete-test"
+ c := NewCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME="+projectName))
+
+ reset := func() {
+ c.RunDockerComposeCmd(t, "down", "--rmi", "all")
+ }
+ t.Cleanup(reset)
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/scale", "up", "-d", "db")
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerComposeCmd(t, "ps", "--format", "{{.Name}}", "db")
+ res.Assert(t, icmd.Success)
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), projectName+"-db-1")
+
+ cmd := c.NewDockerComposeCmd(t, "--project-directory", "fixtures/scale", "up", "-d", "--scale", "db=2", "db")
+ res = icmd.RunCmd(cmd, func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "MAYBE=value")
+ })
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerComposeCmd(t, "ps", "--format", "{{.Name}}", "db")
+ res.Assert(t, icmd.Success)
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), projectName+"-db-1\n"+projectName+"-db-2")
+
+ t.Log("scale down removes obsolete replica #1")
+ cmd = c.NewDockerComposeCmd(t, "--project-directory", "fixtures/scale", "up", "-d", "--scale", "db=1", "db")
+ res = icmd.RunCmd(cmd, func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "MAYBE=value")
+ })
+ res.Assert(t, icmd.Success)
+
+ res = c.RunDockerComposeCmd(t, "ps", "--format", "{{.Name}}", "db")
+ res.Assert(t, icmd.Success)
+ assert.Equal(t, strings.TrimSpace(res.Stdout()), projectName+"-db-1")
+}
+
+func checkServiceContainer(t *testing.T, stdout, containerName, containerState string, count int) {
+ found := 0
+ lines := strings.Split(stdout, "\n")
+ for _, line := range lines {
+ if strings.Contains(line, containerName) && strings.Contains(line, containerState) {
+ found++
+ }
+ }
+ if found == count {
+ return
+ }
+ errMessage := fmt.Sprintf("expected %d but found %d instance(s) of container %s in stoud", count, found, containerName)
+ if containerState != "" {
+ errMessage += fmt.Sprintf(" with expected state %s", containerState)
+ }
+ testify.Fail(t, errMessage, stdout)
+}
+
+func TestScaleDownNoRecreate(t *testing.T) {
+ const projectName = "scale-down-recreated-test"
+ c := NewCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME="+projectName))
+
+ reset := func() {
+ c.RunDockerComposeCmd(t, "down", "--rmi", "all")
+ }
+ t.Cleanup(reset)
+ c.RunDockerComposeCmd(t, "-f", "fixtures/scale/build.yaml", "build", "--build-arg", "FOO=test")
+ c.RunDockerComposeCmd(t, "-f", "fixtures/scale/build.yaml", "up", "-d", "--scale", "test=2")
+
+ c.RunDockerComposeCmd(t, "-f", "fixtures/scale/build.yaml", "build", "--build-arg", "FOO=updated")
+ c.RunDockerComposeCmd(t, "-f", "fixtures/scale/build.yaml", "up", "-d", "--scale", "test=4", "--no-recreate")
+
+ res := c.RunDockerComposeCmd(t, "ps", "--format", "{{.Name}}", "test")
+ res.Assert(t, icmd.Success)
+ assert.Check(t, strings.Contains(res.Stdout(), "scale-down-recreated-test-test-1"))
+ assert.Check(t, strings.Contains(res.Stdout(), "scale-down-recreated-test-test-2"))
+ assert.Check(t, strings.Contains(res.Stdout(), "scale-down-recreated-test-test-3"))
+ assert.Check(t, strings.Contains(res.Stdout(), "scale-down-recreated-test-test-4"))
+
+ t.Log("scale down removes obsolete replica #1 and #2")
+ c.NewDockerComposeCmd(t, "--project-directory", "fixtures/scale", "up", "-d", "--scale", "test=2")
+
+ res = c.RunDockerComposeCmd(t, "ps", "--format", "{{.Name}}", "test")
+ res.Assert(t, icmd.Success)
+ assert.Check(t, strings.Contains(res.Stdout(), "scale-down-recreated-test-test-3"))
+ assert.Check(t, strings.Contains(res.Stdout(), "scale-down-recreated-test-test-4"))
+}
diff --git a/pkg/e2e/scan_message_test.go b/pkg/e2e/scan_message_test.go
deleted file mode 100644
index 74a4681f6ef..00000000000
--- a/pkg/e2e/scan_message_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package e2e
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "testing"
-
- "github.com/docker/compose/v2/pkg/utils"
-
- "gotest.tools/v3/assert"
- "gotest.tools/v3/icmd"
-)
-
-func TestDisplayScanMessageAfterBuild(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
-
- // assert docker scan plugin is available
- c.RunDockerOrExitError("scan", "--help")
-
- t.Run("display on compose build", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "fixtures/simple-build-test/compose.yaml", "-p", "scan-msg-test-compose-build", "build")
- defer c.RunDockerOrExitError("rmi", "-f", "scan-msg-test-compose-build_nginx")
- res.Assert(t, icmd.Expected{Err: utils.ScanSuggestMsg})
- })
-
- t.Run("do not display on compose build with quiet flag", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "fixtures/simple-build-test/compose.yaml", "-p", "scan-msg-test-quiet", "build", "--quiet")
- assert.Assert(t, !strings.Contains(res.Combined(), "docker scan"), res.Combined())
- res = c.RunDockerCmd("rmi", "-f", "scan-msg-test-quiet_nginx")
- assert.Assert(t, !strings.Contains(res.Combined(), "No such image"))
-
- res = c.RunDockerComposeCmd("-f", "fixtures/simple-build-test/compose.yaml", "-p", "scan-msg-test-q", "build", "-q")
- defer c.RunDockerOrExitError("rmi", "-f", "scan-msg-test-q_nginx")
- assert.Assert(t, !strings.Contains(res.Combined(), "docker scan"), res.Combined())
- })
-
- _ = c.RunDockerOrExitError("rmi", "scan-msg-test_nginx")
-
- t.Run("display on compose up if image is built", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "fixtures/simple-build-test/compose.yaml", "-p", "scan-msg-test", "up", "-d")
- defer c.RunDockerOrExitError("compose", "-f", "fixtures/simple-build-test/compose.yaml", "-p", "scan-msg-test", "down")
- res.Assert(t, icmd.Expected{Err: utils.ScanSuggestMsg})
- })
-
- t.Run("do not display on compose up if no image built", func(t *testing.T) { // re-run the same Compose aproject
- res := c.RunDockerComposeCmd("-f", "fixtures/simple-build-test/compose.yaml", "-p", "scan-msg-test", "up", "-d")
- defer c.RunDockerOrExitError("compose", "-f", "fixtures/simple-build-test/compose.yaml", "-p", "scan-msg-test", "down", "--rmi", "all")
- assert.Assert(t, !strings.Contains(res.Combined(), "docker scan"), res.Combined())
- })
-
- t.Run("do not display if scan already invoked", func(t *testing.T) {
- _ = os.MkdirAll(filepath.Join(c.ConfigDir, "scan"), 0755)
- scanConfigFile := filepath.Join(c.ConfigDir, "scan", "config.json")
- err := ioutil.WriteFile(scanConfigFile, []byte(`{"optin":true}`), 0644)
- assert.NilError(t, err)
-
- res := c.RunDockerCmd("build", "-t", "test-image-scan-msg", "fixtures/simple-build-test/nginx-build")
- assert.Assert(t, !strings.Contains(res.Combined(), "docker scan"), res.Combined())
- })
-}
diff --git a/pkg/e2e/secrets_test.go b/pkg/e2e/secrets_test.go
new file mode 100644
index 00000000000..3e3895112a3
--- /dev/null
+++ b/pkg/e2e/secrets_test.go
@@ -0,0 +1,53 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "testing"
+
+ "gotest.tools/v3/icmd"
+)
+
+func TestSecretFromEnv(t *testing.T) {
+ c := NewParallelCLI(t)
+ defer c.cleanupWithDown(t, "env-secret")
+
+ t.Run("compose run", func(t *testing.T) {
+ res := icmd.RunCmd(c.NewDockerComposeCmd(t, "-f", "./fixtures/env-secret/compose.yaml", "run", "foo"),
+ func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "SECRET=BAR")
+ })
+ res.Assert(t, icmd.Expected{Out: "BAR"})
+ })
+ t.Run("secret uid", func(t *testing.T) {
+ res := icmd.RunCmd(c.NewDockerComposeCmd(t, "-f", "./fixtures/env-secret/compose.yaml", "run", "foo", "ls", "-al", "/var/run/secrets/bar"),
+ func(cmd *icmd.Cmd) {
+ cmd.Env = append(cmd.Env, "SECRET=BAR")
+ })
+ res.Assert(t, icmd.Expected{Out: "-r--r----- 1 1005 1005"})
+ })
+}
+
+func TestSecretFromInclude(t *testing.T) {
+ c := NewParallelCLI(t)
+ defer c.cleanupWithDown(t, "env-secret-include")
+
+ t.Run("compose run", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/env-secret/compose.yaml", "run", "included")
+ res.Assert(t, icmd.Expected{Out: "this-is-secret"})
+ })
+}
diff --git a/pkg/e2e/start_stop_test.go b/pkg/e2e/start_stop_test.go
index 39dfdff336c..19f07b71960 100644
--- a/pkg/e2e/start_stop_test.go
+++ b/pkg/e2e/start_stop_test.go
@@ -23,10 +23,11 @@ import (
testify "github.com/stretchr/testify/assert"
"gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
)
func TestStartStop(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
const projectName = "e2e-start-stop-no-dependencies"
getProjectRegx := func(status string) string {
@@ -35,103 +36,249 @@ func TestStartStop(t *testing.T) {
return fmt.Sprintf("%s\\s+%s\\(%d\\)", projectName, status, 2)
}
- getServiceRegx := func(service string, status string) string {
- // match output with random spaces like:
- // e2e-start-stop-db-1 "echo hello" db running
- return fmt.Sprintf("%s-%s-1.+%s\\s+%s", projectName, service, service, status)
- }
-
t.Run("Up a project", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/start-stop/compose.yaml", "--project-name", projectName, "up", "-d")
- assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-no-dependencies-simple-1 Started"), res.Combined())
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/start-stop/compose.yaml", "--project-name", projectName, "up",
+ "-d")
+ assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-no-dependencies-simple-1 Started"), res.Combined())
- res = c.RunDockerComposeCmd("ls", "--all")
+ res = c.RunDockerComposeCmd(t, "ls", "--all")
testify.Regexp(t, getProjectRegx("running"), res.Stdout())
- res = c.RunDockerComposeCmd("--project-name", projectName, "ps")
- testify.Regexp(t, getServiceRegx("simple", "running"), res.Stdout())
- testify.Regexp(t, getServiceRegx("another", "running"), res.Stdout())
+ res = c.RunDockerComposeCmd(t, "--project-name", projectName, "ps")
+ assertServiceStatus(t, projectName, "simple", "Up", res.Stdout())
+ assertServiceStatus(t, projectName, "another", "Up", res.Stdout())
})
t.Run("stop project", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/start-stop/compose.yaml", "--project-name", projectName, "stop")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/start-stop/compose.yaml", "--project-name", projectName, "stop")
- res := c.RunDockerComposeCmd("ls")
+ res := c.RunDockerComposeCmd(t, "ls")
assert.Assert(t, !strings.Contains(res.Combined(), "e2e-start-stop-no-dependencies"), res.Combined())
- res = c.RunDockerComposeCmd("ls", "--all")
+ res = c.RunDockerComposeCmd(t, "ls", "--all")
testify.Regexp(t, getProjectRegx("exited"), res.Stdout())
- res = c.RunDockerComposeCmd("--project-name", projectName, "ps")
+ res = c.RunDockerComposeCmd(t, "--project-name", projectName, "ps")
assert.Assert(t, !strings.Contains(res.Combined(), "e2e-start-stop-no-dependencies-words-1"), res.Combined())
- res = c.RunDockerComposeCmd("--project-name", projectName, "ps", "--all")
- testify.Regexp(t, getServiceRegx("simple", "exited"), res.Stdout())
- testify.Regexp(t, getServiceRegx("another", "exited"), res.Stdout())
+ res = c.RunDockerComposeCmd(t, "--project-name", projectName, "ps", "--all")
+ assertServiceStatus(t, projectName, "simple", "Exited", res.Stdout())
+ assertServiceStatus(t, projectName, "another", "Exited", res.Stdout())
})
t.Run("start project", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/start-stop/compose.yaml", "--project-name", projectName, "start")
-
- res := c.RunDockerComposeCmd("ls")
- testify.Regexp(t, getProjectRegx("running"), res.Stdout())
- })
-
- t.Run("pause project", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/start-stop/compose.yaml", "--project-name", projectName, "pause")
-
- res := c.RunDockerComposeCmd("ls", "--all")
- testify.Regexp(t, getProjectRegx("paused"), res.Stdout())
- })
-
- t.Run("unpause project", func(t *testing.T) {
- c.RunDockerComposeCmd("-f", "./fixtures/start-stop/compose.yaml", "--project-name", projectName, "unpause")
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/start-stop/compose.yaml", "--project-name", projectName, "start")
- res := c.RunDockerComposeCmd("ls")
+ res := c.RunDockerComposeCmd(t, "ls")
testify.Regexp(t, getProjectRegx("running"), res.Stdout())
})
t.Run("down", func(t *testing.T) {
- _ = c.RunDockerComposeCmd("--project-name", projectName, "down")
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
})
}
func TestStartStopWithDependencies(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
const projectName = "e2e-start-stop-with-dependencies"
- defer c.RunDockerComposeCmd("--project-name", projectName, "rm", "-fsv")
+ defer c.RunDockerComposeCmd(t, "--project-name", projectName, "rm", "-fsv")
t.Run("Up", func(t *testing.T) {
- res := c.RunDockerComposeCmd("-f", "./fixtures/dependencies/compose.yaml", "--project-name", projectName, "up", "-d")
- assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-with-dependencies-foo-1 Started"), res.Combined())
- assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-with-dependencies-bar-1 Started"), res.Combined())
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/dependencies/compose.yaml", "--project-name", projectName,
+ "up", "-d")
+ assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-with-dependencies-foo-1 Started"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-with-dependencies-bar-1 Started"), res.Combined())
})
t.Run("stop foo", func(t *testing.T) {
- res := c.RunDockerComposeCmd("--project-name", projectName, "stop", "foo")
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "stop", "foo")
- assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-with-dependencies-foo-1 Stopped"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-with-dependencies-foo-1 Stopped"), res.Combined())
- res = c.RunDockerComposeCmd("--project-name", projectName, "ps", "--status", "running")
+ res = c.RunDockerComposeCmd(t, "--project-name", projectName, "ps", "--status", "running")
assert.Assert(t, strings.Contains(res.Combined(), "e2e-start-stop-with-dependencies-bar-1"), res.Combined())
assert.Assert(t, !strings.Contains(res.Combined(), "e2e-start-stop-with-dependencies-foo-1"), res.Combined())
})
t.Run("start foo", func(t *testing.T) {
- res := c.RunDockerComposeCmd("--project-name", projectName, "stop")
- assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-with-dependencies-bar-1 Stopped"), res.Combined())
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "stop")
+ assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-with-dependencies-bar-1 Stopped"), res.Combined())
+
+ res = c.RunDockerComposeCmd(t, "--project-name", projectName, "start", "foo")
+ out := res.Combined()
+ assert.Assert(t, strings.Contains(out, "Container e2e-start-stop-with-dependencies-bar-1 Started"), out)
+ assert.Assert(t, strings.Contains(out, "Container e2e-start-stop-with-dependencies-foo-1 Started"), out)
+
+ res = c.RunDockerComposeCmd(t, "--project-name", projectName, "ps", "--status", "running")
+ out = res.Combined()
+ assert.Assert(t, strings.Contains(out, "e2e-start-stop-with-dependencies-bar-1"), out)
+ assert.Assert(t, strings.Contains(out, "e2e-start-stop-with-dependencies-foo-1"), out)
+ })
- res = c.RunDockerComposeCmd("--project-name", projectName, "start", "foo")
- assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-with-dependencies-bar-1 Started"), res.Combined())
- assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-with-dependencies-foo-1 Started"), res.Combined())
+ t.Run("Up no-deps links", func(t *testing.T) {
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/links/compose.yaml", "--project-name", projectName, "up",
+ "--no-deps", "-d", "foo")
+ assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-with-dependencies-foo-1 Started"), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), "Container e2e-start-stop-with-dependencies-bar-1 Started"), res.Combined())
+ })
- res = c.RunDockerComposeCmd("--project-name", projectName, "ps", "--status", "running")
- assert.Assert(t, strings.Contains(res.Combined(), "e2e-start-stop-with-dependencies-bar-1"), res.Combined())
- assert.Assert(t, strings.Contains(res.Combined(), "e2e-start-stop-with-dependencies-foo-1"), res.Combined())
+ t.Run("down", func(t *testing.T) {
+ _ = c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+}
+
+func TestStartStopWithOneOffs(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "e2e-start-stop-with-oneoffs"
+
+ t.Run("Up", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/dependencies/compose.yaml", "--project-name", projectName,
+ "up", "-d")
+ assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-with-oneoffs-foo-1 Started"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-with-oneoffs-bar-1 Started"), res.Combined())
+ })
+
+ t.Run("run one-off", func(t *testing.T) {
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/dependencies/compose.yaml", "--project-name", projectName, "run", "-d", "bar", "sleep", "infinity")
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "ps", "-a")
+ assert.Assert(t, strings.Contains(res.Combined(), "e2e-start-stop-with-oneoffs-foo-1"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "e2e-start-stop-with-oneoffs-bar-1"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "e2e-start-stop-with-oneoffs-bar-run"), res.Combined())
+ })
+
+ t.Run("stop (not one-off containers)", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "stop")
+ assert.Assert(t, strings.Contains(res.Combined(), "e2e-start-stop-with-oneoffs-foo-1"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "e2e-start-stop-with-oneoffs-bar-1"), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), "e2e_start_stop_with_oneoffs-bar-run"), res.Combined())
+
+ res = c.RunDockerComposeCmd(t, "--project-name", projectName, "ps", "-a", "--status", "running")
+ assert.Assert(t, strings.Contains(res.Combined(), "e2e-start-stop-with-oneoffs-bar-run"), res.Combined())
+ })
+
+ t.Run("start (not one-off containers)", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "start")
+ assert.Assert(t, strings.Contains(res.Combined(), "e2e-start-stop-with-oneoffs-foo-1"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "e2e-start-stop-with-oneoffs-bar-1"), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), "e2e-start-stop-with-oneoffs-bar-run"), res.Combined())
+ })
+
+ t.Run("restart (not one-off containers)", func(t *testing.T) {
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "restart")
+ assert.Assert(t, strings.Contains(res.Combined(), "e2e-start-stop-with-oneoffs-foo-1"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "e2e-start-stop-with-oneoffs-bar-1"), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), "e2e-start-stop-with-oneoffs-bar-run"), res.Combined())
})
t.Run("down", func(t *testing.T) {
- _ = c.RunDockerComposeCmd("--project-name", projectName, "down")
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--remove-orphans")
+
+ res := c.RunDockerComposeCmd(t, "--project-name", projectName, "ps", "-a", "--status", "running")
+ assert.Assert(t, !strings.Contains(res.Combined(), "e2e-start-stop-with-oneoffs-bar"), res.Combined())
})
}
+
+func TestStartAlreadyRunning(t *testing.T) {
+ cli := NewParallelCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=e2e-start-stop-svc-already-running",
+ "COMPOSE_FILE=./fixtures/start-stop/compose.yaml"))
+ t.Cleanup(func() {
+ cli.RunDockerComposeCmd(t, "down", "--remove-orphans", "-v", "-t", "0")
+ })
+
+ cli.RunDockerComposeCmd(t, "up", "-d", "--wait")
+
+ res := cli.RunDockerComposeCmd(t, "start", "simple")
+ assert.Equal(t, res.Stdout(), "", "No output should have been written to stdout")
+}
+
+func TestStopAlreadyStopped(t *testing.T) {
+ cli := NewParallelCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=e2e-start-stop-svc-already-stopped",
+ "COMPOSE_FILE=./fixtures/start-stop/compose.yaml"))
+ t.Cleanup(func() {
+ cli.RunDockerComposeCmd(t, "down", "--remove-orphans", "-v", "-t", "0")
+ })
+
+ cli.RunDockerComposeCmd(t, "up", "-d", "--wait")
+
+ // stop the container
+ cli.RunDockerComposeCmd(t, "stop", "simple")
+
+ // attempt to stop it again
+ res := cli.RunDockerComposeCmdNoCheck(t, "stop", "simple")
+ // TODO: for consistency, this should NOT write any output because the
+ // container is already stopped
+ res.Assert(t, icmd.Expected{
+ ExitCode: 0,
+ Err: "Container e2e-start-stop-svc-already-stopped-simple-1 Stopped",
+ })
+}
+
+func TestStartStopMultipleServices(t *testing.T) {
+ cli := NewParallelCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=e2e-start-stop-svc-multiple",
+ "COMPOSE_FILE=./fixtures/start-stop/compose.yaml"))
+ t.Cleanup(func() {
+ cli.RunDockerComposeCmd(t, "down", "--remove-orphans", "-v", "-t", "0")
+ })
+
+ cli.RunDockerComposeCmd(t, "up", "-d", "--wait")
+
+ res := cli.RunDockerComposeCmd(t, "stop", "simple", "another")
+ services := []string{"simple", "another"}
+ for _, svc := range services {
+ stopMsg := fmt.Sprintf("Container e2e-start-stop-svc-multiple-%s-1 Stopped", svc)
+ assert.Assert(t, strings.Contains(res.Stderr(), stopMsg),
+ fmt.Sprintf("Missing stop message for %s\n%s", svc, res.Combined()))
+ }
+
+ res = cli.RunDockerComposeCmd(t, "start", "simple", "another")
+ for _, svc := range services {
+ startMsg := fmt.Sprintf("Container e2e-start-stop-svc-multiple-%s-1 Started", svc)
+ assert.Assert(t, strings.Contains(res.Stderr(), startMsg),
+ fmt.Sprintf("Missing start message for %s\n%s", svc, res.Combined()))
+ }
+}
+
+func TestStartSingleServiceAndDependency(t *testing.T) {
+ cli := NewParallelCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=e2e-start-single-deps",
+ "COMPOSE_FILE=./fixtures/start-stop/start-stop-deps.yaml"))
+ t.Cleanup(func() {
+ cli.RunDockerComposeCmd(t, "down", "--remove-orphans", "-v", "-t", "0")
+ })
+
+ cli.RunDockerComposeCmd(t, "create", "desired")
+
+ res := cli.RunDockerComposeCmd(t, "start", "desired")
+ desiredServices := []string{"desired", "dep_1", "dep_2"}
+ for _, s := range desiredServices {
+ startMsg := fmt.Sprintf("Container e2e-start-single-deps-%s-1 Started", s)
+ assert.Assert(t, strings.Contains(res.Combined(), startMsg),
+ fmt.Sprintf("Missing start message for service: %s\n%s", s, res.Combined()))
+ }
+ undesiredServices := []string{"another", "another_2"}
+ for _, s := range undesiredServices {
+ assert.Assert(t, !strings.Contains(res.Combined(), s),
+ fmt.Sprintf("Shouldn't have message for service: %s\n%s", s, res.Combined()))
+ }
+}
+
+func TestStartStopMultipleFiles(t *testing.T) {
+ cli := NewParallelCLI(t, WithEnv("COMPOSE_PROJECT_NAME=e2e-start-stop-svc-multiple-files"))
+ t.Cleanup(func() {
+ cli.RunDockerComposeCmd(t, "-p", "e2e-start-stop-svc-multiple-files", "down", "--remove-orphans")
+ })
+
+ cli.RunDockerComposeCmd(t, "-f", "./fixtures/start-stop/compose.yaml", "up", "-d")
+ cli.RunDockerComposeCmd(t, "-f", "./fixtures/start-stop/other.yaml", "up", "-d")
+
+ res := cli.RunDockerComposeCmd(t, "-f", "./fixtures/start-stop/compose.yaml", "stop")
+ assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-svc-multiple-files-simple-1 Stopped"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), "Container e2e-start-stop-svc-multiple-files-another-1 Stopped"), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), "Container e2e-start-stop-svc-multiple-files-a-different-one-1 Stopped"), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), "Container e2e-start-stop-svc-multiple-files-and-another-one-1 Stopped"), res.Combined())
+}
diff --git a/pkg/e2e/up_test.go b/pkg/e2e/up_test.go
new file mode 100644
index 00000000000..a13ff63da51
--- /dev/null
+++ b/pkg/e2e/up_test.go
@@ -0,0 +1,225 @@
+//go:build !windows
+// +build !windows
+
+/*
+ Copyright 2022 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os/exec"
+ "strings"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/docker/compose/v5/pkg/utils"
+ "github.com/stretchr/testify/require"
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
+)
+
+func TestUpServiceUnhealthy(t *testing.T) {
+ c := NewParallelCLI(t)
+ const projectName = "e2e-start-fail"
+
+ res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/start-fail/compose.yaml", "--project-name", projectName, "up", "-d")
+ res.Assert(t, icmd.Expected{ExitCode: 1, Err: `container e2e-start-fail-fail-1 is unhealthy`})
+
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+}
+
+func TestUpDependenciesNotStopped(t *testing.T) {
+ c := NewParallelCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME=up-deps-stop",
+ ))
+
+ reset := func() {
+ c.RunDockerComposeCmdNoCheck(t, "down", "-t=0", "--remove-orphans", "-v")
+ }
+ reset()
+ t.Cleanup(reset)
+
+ t.Log("Launching orphan container (background)")
+ c.RunDockerComposeCmd(t,
+ "-f=./fixtures/ups-deps-stop/orphan.yaml",
+ "up",
+ "--wait",
+ "--detach",
+ "orphan",
+ )
+ RequireServiceState(t, c, "orphan", "running")
+
+ t.Log("Launching app container with implicit dependency")
+ upOut := &utils.SafeBuffer{}
+ testCmd := c.NewDockerComposeCmd(t,
+ "-f=./fixtures/ups-deps-stop/compose.yaml",
+ "up",
+ "--menu=false",
+ "app",
+ )
+
+ ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ t.Cleanup(cancel)
+
+ cmd, err := StartWithNewGroupID(ctx, testCmd, upOut, nil)
+ assert.NilError(t, err, "Failed to run compose up")
+
+ t.Log("Waiting for containers to be in running state")
+ upOut.RequireEventuallyContains(t, "hello app")
+ RequireServiceState(t, c, "app", "running")
+ RequireServiceState(t, c, "dependency", "running")
+
+ t.Log("Simulating Ctrl-C")
+ require.NoError(t, syscall.Kill(-cmd.Process.Pid, syscall.SIGINT),
+ "Failed to send SIGINT to compose up process")
+
+ t.Log("Waiting for `compose up` to exit")
+ err = cmd.Wait()
+ if err != nil {
+ var exitErr *exec.ExitError
+ errors.As(err, &exitErr)
+ if exitErr.ExitCode() == -1 {
+ t.Fatalf("`compose up` was killed: %v", err)
+ }
+ require.Equal(t, 130, exitErr.ExitCode())
+ }
+
+ RequireServiceState(t, c, "app", "exited")
+ // dependency should still be running
+ RequireServiceState(t, c, "dependency", "running")
+ RequireServiceState(t, c, "orphan", "running")
+}
+
+func TestUpWithBuildDependencies(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Run("up with service using image build by an another service", func(t *testing.T) {
+ // ensure local test run does not reuse previously build image
+ c.RunDockerOrExitError(t, "rmi", "built-image-dependency")
+
+ res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/dependencies",
+ "-f", "fixtures/dependencies/service-image-depends-on.yaml", "up", "-d")
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/dependencies",
+ "-f", "fixtures/dependencies/service-image-depends-on.yaml", "down", "--rmi", "all")
+ })
+
+ res.Assert(t, icmd.Success)
+ })
+}
+
+func TestUpWithDependencyExit(t *testing.T) {
+ c := NewParallelCLI(t)
+
+ t.Run("up with dependency to exit before being healthy", func(t *testing.T) {
+ res := c.RunDockerComposeCmdNoCheck(t, "--project-directory", "fixtures/dependencies",
+ "-f", "fixtures/dependencies/dependency-exit.yaml", "up", "-d")
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", "dependencies", "down")
+ })
+
+ res.Assert(t, icmd.Expected{ExitCode: 1, Err: "dependency failed to start: container dependencies-db-1 exited (1)"})
+ })
+}
+
+func TestScaleDoesntRecreate(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "compose-e2e-scale"
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+
+ c.RunDockerComposeCmd(t, "-f", "fixtures/simple-composefile/compose.yaml", "--project-name", projectName, "up", "-d")
+
+ res := c.RunDockerComposeCmd(t, "-f", "fixtures/simple-composefile/compose.yaml", "--project-name", projectName, "up", "--scale", "simple=2", "-d")
+ assert.Check(t, !strings.Contains(res.Combined(), "Recreated"))
+}
+
+func TestUpWithDependencyNotRequired(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "compose-e2e-dependency-not-required"
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/dependencies/deps-not-required.yaml", "--project-name", projectName,
+ "--profile", "not-required", "up", "-d")
+ assert.Assert(t, strings.Contains(res.Combined(), "foo"), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), " optional dependency \"bar\" failed to start"), res.Combined())
+}
+
+func TestUpWithAllResources(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "compose-e2e-all-resources"
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "-v")
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/resources/compose.yaml", "--all-resources", "--project-name", projectName, "up")
+ assert.Assert(t, strings.Contains(res.Combined(), fmt.Sprintf(`Volume %s_my_vol Created`, projectName)), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), fmt.Sprintf(`Network %s_my_net Created`, projectName)), res.Combined())
+}
+
+func TestUpProfile(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "compose-e2e-up-profile"
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "--profile", "test", "down", "-v")
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/profiles/docker-compose.yaml", "--project-name", projectName, "up", "foo")
+ assert.Assert(t, strings.Contains(res.Combined(), `Container db_c Created`), res.Combined())
+ assert.Assert(t, strings.Contains(res.Combined(), `Container foo_c Created`), res.Combined())
+ assert.Assert(t, !strings.Contains(res.Combined(), `Container bar_c Created`), res.Combined())
+}
+
+func TestUpImageID(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "compose-e2e-up-image-id"
+
+ digest := strings.TrimSpace(c.RunDockerCmd(t, "image", "inspect", "alpine", "-f", "{{ .ID }}").Stdout())
+ _, id, _ := strings.Cut(digest, ":")
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "-v")
+ })
+
+ c = NewCLI(t, WithEnv(fmt.Sprintf("ID=%s", id)))
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/simple-composefile/id.yaml", "--project-name", projectName, "up")
+}
+
+func TestUpStopWithLogsMixed(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "compose-e2e-stop-logs"
+
+ t.Cleanup(func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "-v")
+ })
+
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/stop/compose.yaml", "--project-name", projectName, "up", "--abort-on-container-exit")
+ // assert we still get service2 logs after service 1 Stopped event
+ res.Assert(t, icmd.Expected{
+ Err: "Container compose-e2e-stop-logs-service1-1 Stopped",
+ })
+ // assert we get stop hook logs
+ res.Assert(t, icmd.Expected{Out: "service2-1 -> | stop hook running...\nservice2-1 | 64 bytes"})
+}
diff --git a/pkg/e2e/volumes_test.go b/pkg/e2e/volumes_test.go
index 5717449021a..d018ed699d6 100644
--- a/pkg/e2e/volumes_test.go
+++ b/pkg/e2e/volumes_test.go
@@ -17,25 +17,31 @@
package e2e
import (
+ "fmt"
"net/http"
+ "os"
+ "path/filepath"
+ "runtime"
"strings"
"testing"
"time"
"gotest.tools/v3/assert"
+ "gotest.tools/v3/icmd"
)
func TestLocalComposeVolume(t *testing.T) {
- c := NewParallelE2eCLI(t, binDir)
+ c := NewParallelCLI(t)
const projectName = "compose-e2e-volume"
t.Run("up with build and no image name, volume", func(t *testing.T) {
// ensure local test run does not reuse previously build image
- c.RunDockerOrExitError("rmi", "compose-e2e-volume_nginx")
- c.RunDockerOrExitError("volume", "rm", projectName+"_staticVol")
- c.RunDockerOrExitError("volume", "rm", "myvolume")
- c.RunDockerComposeCmd("--project-directory", "fixtures/volume-test", "--project-name", projectName, "up", "-d")
+ c.RunDockerOrExitError(t, "rmi", "compose-e2e-volume-nginx")
+ c.RunDockerOrExitError(t, "volume", "rm", projectName+"-staticVol")
+ c.RunDockerOrExitError(t, "volume", "rm", "myvolume")
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/volume-test", "--project-name", projectName, "up",
+ "-d")
})
t.Run("access bind mount data", func(t *testing.T) {
@@ -44,47 +50,146 @@ func TestLocalComposeVolume(t *testing.T) {
})
t.Run("check container volume specs", func(t *testing.T) {
- res := c.RunDockerCmd("inspect", "compose-e2e-volume-nginx2-1", "--format", "{{ json .Mounts }}")
+ res := c.RunDockerCmd(t, "inspect", "compose-e2e-volume-nginx2-1", "--format", "{{ json .Mounts }}")
output := res.Stdout()
- // nolint
assert.Assert(t, strings.Contains(output, `"Destination":"/usr/src/app/node_modules","Driver":"local","Mode":"z","RW":true,"Propagation":""`), output)
assert.Assert(t, strings.Contains(output, `"Destination":"/myconfig","Mode":"","RW":false,"Propagation":"rprivate"`), output)
})
t.Run("check config content", func(t *testing.T) {
- output := c.RunDockerCmd("exec", "compose-e2e-volume-nginx2-1", "cat", "/myconfig").Stdout()
+ output := c.RunDockerCmd(t, "exec", "compose-e2e-volume-nginx2-1", "cat", "/myconfig").Stdout()
assert.Assert(t, strings.Contains(output, `Hello from Nginx container`), output)
})
t.Run("check secrets content", func(t *testing.T) {
- output := c.RunDockerCmd("exec", "compose-e2e-volume-nginx2-1", "cat", "/run/secrets/mysecret").Stdout()
+ output := c.RunDockerCmd(t, "exec", "compose-e2e-volume-nginx2-1", "cat", "/run/secrets/mysecret").Stdout()
assert.Assert(t, strings.Contains(output, `Hello from Nginx container`), output)
})
t.Run("check container bind-mounts specs", func(t *testing.T) {
- res := c.RunDockerCmd("inspect", "compose-e2e-volume-nginx-1", "--format", "{{ json .Mounts }}")
+ res := c.RunDockerCmd(t, "inspect", "compose-e2e-volume-nginx-1", "--format", "{{ json .Mounts }}")
output := res.Stdout()
- // nolint
assert.Assert(t, strings.Contains(output, `"Type":"bind"`))
assert.Assert(t, strings.Contains(output, `"Destination":"/usr/share/nginx/html"`))
})
t.Run("should inherit anonymous volumes", func(t *testing.T) {
- c.RunDockerOrExitError("exec", "compose-e2e-volume-nginx2-1", "touch", "/usr/src/app/node_modules/test")
- c.RunDockerOrExitError("compose", "--project-directory", "fixtures/volume-test", "--project-name", projectName, "up", "--force-recreate", "-d")
- c.RunDockerOrExitError("exec", "compose-e2e-volume-nginx2-1", "ls", "/usr/src/app/node_modules/test")
+ c.RunDockerOrExitError(t, "exec", "compose-e2e-volume-nginx2-1", "touch", "/usr/src/app/node_modules/test")
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/volume-test", "--project-name", projectName, "up", "--force-recreate", "-d")
+ c.RunDockerOrExitError(t, "exec", "compose-e2e-volume-nginx2-1", "ls", "/usr/src/app/node_modules/test")
})
t.Run("should renew anonymous volumes", func(t *testing.T) {
- c.RunDockerOrExitError("exec", "compose-e2e-volume-nginx2-1", "touch", "/usr/src/app/node_modules/test")
- c.RunDockerOrExitError("compose", "--project-directory", "fixtures/volume-test", "--project-name", projectName, "up", "--force-recreate", "--renew-anon-volumes", "-d")
- c.RunDockerOrExitError("exec", "compose-e2e-volume-nginx2-1", "ls", "/usr/src/app/node_modules/test")
+ c.RunDockerOrExitError(t, "exec", "compose-e2e-volume-nginx2-1", "touch", "/usr/src/app/node_modules/test")
+ c.RunDockerComposeCmd(t, "--project-directory", "fixtures/volume-test", "--project-name", projectName, "up", "--force-recreate", "--renew-anon-volumes", "-d")
+ c.RunDockerOrExitError(t, "exec", "compose-e2e-volume-nginx2-1", "ls", "/usr/src/app/node_modules/test")
})
t.Run("cleanup volume project", func(t *testing.T) {
- c.RunDockerComposeCmd("--project-name", projectName, "down", "--volumes")
- ls := c.RunDockerCmd("volume", "ls").Stdout()
- assert.Assert(t, !strings.Contains(ls, projectName+"_staticVol"))
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--volumes")
+ ls := c.RunDockerCmd(t, "volume", "ls").Stdout()
+ assert.Assert(t, !strings.Contains(ls, projectName+"-staticVol"))
assert.Assert(t, !strings.Contains(ls, "myvolume"))
})
}
+
+func TestProjectVolumeBind(t *testing.T) {
+ if composeStandaloneMode {
+ t.Skip()
+ }
+ c := NewParallelCLI(t)
+ const projectName = "compose-e2e-project-volume-bind"
+
+ t.Run("up on project volume with bind specification", func(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("Running on Windows. Skipping...")
+ }
+ tmpDir, err := os.MkdirTemp("", projectName)
+ assert.NilError(t, err)
+ defer os.RemoveAll(tmpDir) //nolint
+
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+
+ c.RunDockerOrExitError(t, "volume", "rm", "-f", projectName+"_project-data").Assert(t, icmd.Success)
+ cmd := c.NewCmdWithEnv([]string{"TEST_DIR=" + tmpDir},
+ "docker", "compose", "--project-directory", "fixtures/project-volume-bind-test", "--project-name", projectName, "up", "-d")
+ icmd.RunCmd(cmd).Assert(t, icmd.Success)
+ defer c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
+
+ c.RunCmd(t, "sh", "-c", "echo SUCCESS > "+filepath.Join(tmpDir, "resultfile")).Assert(t, icmd.Success)
+
+ ret := c.RunDockerOrExitError(t, "exec", "frontend", "bash", "-c", "cat /data/resultfile").Assert(t, icmd.Success)
+ assert.Assert(t, strings.Contains(ret.Stdout(), "SUCCESS"))
+ })
+}
+
+func TestUpSwitchVolumes(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "compose-e2e-switch-volumes"
+ t.Cleanup(func() {
+ c.cleanupWithDown(t, projectName)
+ c.RunDockerCmd(t, "volume", "rm", "-f", "test_external_volume")
+ c.RunDockerCmd(t, "volume", "rm", "-f", "test_external_volume_2")
+ })
+
+ c.RunDockerCmd(t, "volume", "create", "test_external_volume")
+ c.RunDockerCmd(t, "volume", "create", "test_external_volume_2")
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/switch-volumes/compose.yaml", "--project-name", projectName, "up", "-d")
+
+ res := c.RunDockerCmd(t, "inspect", fmt.Sprintf("%s-app-1", projectName), "-f", "{{ (index .Mounts 0).Name }}")
+ res.Assert(t, icmd.Expected{Out: "test_external_volume"})
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/switch-volumes/compose2.yaml", "--project-name", projectName, "up", "-d")
+ res = c.RunDockerCmd(t, "inspect", fmt.Sprintf("%s-app-1", projectName), "-f", "{{ (index .Mounts 0).Name }}")
+ res.Assert(t, icmd.Expected{Out: "test_external_volume_2"})
+}
+
+func TestUpRecreateVolumes(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "compose-e2e-recreate-volumes"
+ t.Cleanup(func() {
+ c.cleanupWithDown(t, projectName)
+ })
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/recreate-volumes/compose.yaml", "--project-name", projectName, "up", "-d")
+
+ res := c.RunDockerCmd(t, "volume", "inspect", fmt.Sprintf("%s_my_vol", projectName), "-f", "{{ index .Labels \"foo\" }}")
+ res.Assert(t, icmd.Expected{Out: "bar"})
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/recreate-volumes/compose2.yaml", "--project-name", projectName, "up", "-d", "-y")
+ res = c.RunDockerCmd(t, "volume", "inspect", fmt.Sprintf("%s_my_vol", projectName), "-f", "{{ index .Labels \"foo\" }}")
+ res.Assert(t, icmd.Expected{Out: "zot"})
+}
+
+func TestUpRecreateVolumes_IgnoreBinds(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "compose-e2e-recreate-volumes"
+ t.Cleanup(func() {
+ c.cleanupWithDown(t, projectName)
+ })
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/recreate-volumes/bind.yaml", "--project-name", projectName, "up", "-d")
+
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/recreate-volumes/bind.yaml", "--project-name", projectName, "up", "-d")
+ assert.Check(t, !strings.Contains(res.Combined(), "Recreated"))
+}
+
+func TestImageVolume(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "compose-e2e-image-volume"
+ t.Cleanup(func() {
+ c.cleanupWithDown(t, projectName)
+ })
+
+ version := c.RunDockerCmd(t, "version", "-f", "{{.Server.Version}}")
+ major, _, found := strings.Cut(version.Combined(), ".")
+ assert.Assert(t, found)
+ if major == "26" || major == "27" {
+ t.Skip("Skipping test due to docker version < 28")
+ }
+
+ res := c.RunDockerComposeCmd(t, "-f", "./fixtures/volumes/compose.yaml", "--project-name", projectName, "up", "with_image")
+ out := res.Combined()
+ assert.Check(t, strings.Contains(out, "index.html"))
+}
diff --git a/pkg/e2e/wait_test.go b/pkg/e2e/wait_test.go
new file mode 100644
index 00000000000..37e6903e0d1
--- /dev/null
+++ b/pkg/e2e/wait_test.go
@@ -0,0 +1,107 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "strings"
+ "testing"
+ "time"
+
+ "gotest.tools/v3/icmd"
+
+ "gotest.tools/v3/assert"
+)
+
+func TestWaitOnFaster(t *testing.T) {
+ const projectName = "e2e-wait-faster"
+ c := NewParallelCLI(t)
+
+ cleanup := func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--timeout=0", "--remove-orphans")
+ }
+ t.Cleanup(cleanup)
+ cleanup()
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/wait/compose.yaml", "--project-name", projectName, "up", "-d")
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "wait", "faster")
+}
+
+func TestWaitOnSlower(t *testing.T) {
+ const projectName = "e2e-wait-slower"
+ c := NewParallelCLI(t)
+
+ cleanup := func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--timeout=0", "--remove-orphans")
+ }
+ t.Cleanup(cleanup)
+ cleanup()
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/wait/compose.yaml", "--project-name", projectName, "up", "-d")
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "wait", "slower")
+}
+
+func TestWaitOnInfinity(t *testing.T) {
+ const projectName = "e2e-wait-infinity"
+ c := NewParallelCLI(t)
+
+ cleanup := func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--timeout=0", "--remove-orphans")
+ }
+ t.Cleanup(cleanup)
+ cleanup()
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/wait/compose.yaml", "--project-name", projectName, "up", "-d")
+
+ cmd := c.NewDockerComposeCmd(t, "--project-name", projectName, "wait", "infinity")
+ r := icmd.StartCmd(cmd)
+ assert.NilError(t, r.Error)
+ t.Cleanup(func() {
+ if r.Cmd.Process != nil {
+ _ = r.Cmd.Process.Kill()
+ }
+ })
+
+ finished := make(chan struct{})
+ ticker := time.NewTicker(7 * time.Second)
+ go func() {
+ _ = r.Cmd.Wait()
+ finished <- struct{}{}
+ }()
+
+ select {
+ case <-finished:
+ t.Fatal("wait infinity should not finish")
+ case <-ticker.C:
+ }
+}
+
+func TestWaitAndDrop(t *testing.T) {
+ const projectName = "e2e-wait-and-drop"
+ c := NewParallelCLI(t)
+
+ cleanup := func() {
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--timeout=0", "--remove-orphans")
+ }
+ t.Cleanup(cleanup)
+ cleanup()
+
+ c.RunDockerComposeCmd(t, "-f", "./fixtures/wait/compose.yaml", "--project-name", projectName, "up", "-d")
+ c.RunDockerComposeCmd(t, "--project-name", projectName, "wait", "--down-project", "faster")
+
+ res := c.RunDockerCmd(t, "ps", "--all")
+ assert.Assert(t, !strings.Contains(res.Combined(), projectName), res.Combined())
+}
diff --git a/pkg/e2e/watch_test.go b/pkg/e2e/watch_test.go
new file mode 100644
index 00000000000..360fe5210e3
--- /dev/null
+++ b/pkg/e2e/watch_test.go
@@ -0,0 +1,431 @@
+/*
+ Copyright 2023 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package e2e
+
+import (
+ "bytes"
+ "crypto/rand"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/assert/cmp"
+ "gotest.tools/v3/icmd"
+ "gotest.tools/v3/poll"
+)
+
+func TestWatch(t *testing.T) {
+ services := []string{"alpine", "busybox", "debian"}
+ for _, svcName := range services {
+ t.Run(svcName, func(t *testing.T) {
+ t.Helper()
+ doTest(t, svcName)
+ })
+ }
+}
+
+func TestRebuildOnDotEnvWithExternalNetwork(t *testing.T) {
+ const projectName = "test_rebuild_on_dotenv_with_external_network"
+ const svcName = "ext-alpine"
+ containerName := strings.Join([]string{projectName, svcName, "1"}, "-")
+ const networkName = "e2e-watch-external_network_test"
+ const dotEnvFilepath = "./fixtures/watch/.env"
+
+ c := NewCLI(t, WithEnv(
+ "COMPOSE_PROJECT_NAME="+projectName,
+ "COMPOSE_FILE=./fixtures/watch/with-external-network.yaml",
+ ))
+
+ cleanup := func() {
+ c.RunDockerComposeCmdNoCheck(t, "down", "--remove-orphans", "--volumes", "--rmi=local")
+ c.RunDockerOrExitError(t, "network", "rm", networkName)
+ os.Remove(dotEnvFilepath) //nolint:errcheck
+ }
+ cleanup()
+
+ t.Log("create network that is referenced by the container we're testing")
+ c.RunDockerCmd(t, "network", "create", networkName)
+ res := c.RunDockerCmd(t, "network", "ls")
+ assert.Assert(t, !strings.Contains(res.Combined(), projectName), res.Combined())
+
+ t.Log("create a dotenv file that will be used to trigger the rebuild")
+ err := os.WriteFile(dotEnvFilepath, []byte("HELLO=WORLD"), 0o666)
+ assert.NilError(t, err)
+ _, err = os.ReadFile(dotEnvFilepath)
+ assert.NilError(t, err)
+
+ // TODO: refactor this duplicated code into frameworks? Maybe?
+ t.Log("starting docker compose watch")
+ cmd := c.NewDockerComposeCmd(t, "--verbose", "watch", svcName)
+ // stream output since watch runs in the background
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ r := icmd.StartCmd(cmd)
+ require.NoError(t, r.Error)
+ var testComplete atomic.Bool
+ go func() {
+ // if the process exits abnormally before the test is done, fail the test
+ if err := r.Cmd.Wait(); err != nil && !t.Failed() && !testComplete.Load() {
+ assert.Check(t, cmp.Nil(err))
+ }
+ }()
+
+ t.Log("wait for watch to start watching")
+ c.WaitForCondition(t, func() (bool, string) {
+ out := r.String()
+ return strings.Contains(out, "Watch enabled"), "watch not started"
+ }, 30*time.Second, 1*time.Second)
+
+ pn := c.RunDockerCmd(t, "inspect", containerName, "-f", "{{ .HostConfig.NetworkMode }}")
+ assert.Equal(t, strings.TrimSpace(pn.Stdout()), networkName)
+
+ t.Log("create a dotenv file that will be used to trigger the rebuild")
+ err = os.WriteFile(dotEnvFilepath, []byte("HELLO=WORLD\nTEST=REBUILD"), 0o666)
+ assert.NilError(t, err)
+ _, err = os.ReadFile(dotEnvFilepath)
+ assert.NilError(t, err)
+
+ // NOTE: are there any other ways to check if the container has been rebuilt?
+ t.Log("check if the container has been rebuild")
+ c.WaitForCondition(t, func() (bool, string) {
+ out := r.String()
+ if strings.Count(out, "batch complete") != 1 {
+ return false, fmt.Sprintf("container %s was not rebuilt", containerName)
+ }
+ return true, fmt.Sprintf("container %s was rebuilt", containerName)
+ }, 30*time.Second, 1*time.Second)
+
+ pn2 := c.RunDockerCmd(t, "inspect", containerName, "-f", "{{ .HostConfig.NetworkMode }}")
+ assert.Equal(t, strings.TrimSpace(pn2.Stdout()), networkName)
+
+ assert.Check(t, !strings.Contains(r.Combined(), "Application failed to start after update"))
+
+ t.Cleanup(cleanup)
+ t.Cleanup(func() {
+ // IMPORTANT: watch doesn't exit on its own, don't leak processes!
+ if r.Cmd.Process != nil {
+ t.Logf("Killing watch process: pid[%d]", r.Cmd.Process.Pid)
+ _ = r.Cmd.Process.Kill()
+ }
+ })
+ testComplete.Store(true)
+}
+
+// NOTE: these tests all share a single Compose file but are safe to run
+// concurrently (though that's not recommended).
+func doTest(t *testing.T, svcName string) {
+ tmpdir := t.TempDir()
+ dataDir := filepath.Join(tmpdir, "data")
+ configDir := filepath.Join(tmpdir, "config")
+
+ writeTestFile := func(name, contents, sourceDir string) {
+ t.Helper()
+ dest := filepath.Join(sourceDir, name)
+ require.NoError(t, os.MkdirAll(filepath.Dir(dest), 0o700))
+ t.Logf("writing %q to %q", contents, dest)
+ require.NoError(t, os.WriteFile(dest, []byte(contents+"\n"), 0o600))
+ }
+ writeDataFile := func(name, contents string) {
+ writeTestFile(name, contents, dataDir)
+ }
+
+ composeFilePath := filepath.Join(tmpdir, "compose.yaml")
+ CopyFile(t, filepath.Join("fixtures", "watch", "compose.yaml"), composeFilePath)
+
+ projName := "e2e-watch-" + svcName
+ env := []string{
+ "COMPOSE_FILE=" + composeFilePath,
+ "COMPOSE_PROJECT_NAME=" + projName,
+ }
+
+ cli := NewCLI(t, WithEnv(env...))
+
+ // important that --rmi is used to prune the images and ensure that watch builds on launch
+ defer cli.cleanupWithDown(t, projName, "--rmi=local")
+
+ cmd := cli.NewDockerComposeCmd(t, "--verbose", "watch", svcName)
+ // stream output since watch runs in the background
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ r := icmd.StartCmd(cmd)
+ require.NoError(t, r.Error)
+ t.Cleanup(func() {
+ // IMPORTANT: watch doesn't exit on its own, don't leak processes!
+ if r.Cmd.Process != nil {
+ t.Logf("Killing watch process: pid[%d]", r.Cmd.Process.Pid)
+ _ = r.Cmd.Process.Kill()
+ }
+ })
+ var testComplete atomic.Bool
+ go func() {
+ // if the process exits abnormally before the test is done, fail the test
+ if err := r.Cmd.Wait(); err != nil && !t.Failed() && !testComplete.Load() {
+ assert.Check(t, cmp.Nil(err))
+ }
+ }()
+
+ require.NoError(t, os.Mkdir(dataDir, 0o700))
+
+ checkFileContents := func(path string, contents string) poll.Check {
+ return func(pollLog poll.LogT) poll.Result {
+ if r.Cmd.ProcessState != nil {
+ return poll.Error(fmt.Errorf("watch process exited early: %s", r.Cmd.ProcessState))
+ }
+ res := icmd.RunCmd(cli.NewDockerComposeCmd(t, "exec", svcName, "cat", path))
+ if strings.Contains(res.Stdout(), contents) {
+ return poll.Success()
+ }
+ return poll.Continue("%v", res.Combined())
+ }
+ }
+
+ waitForFlush := func() {
+ b := make([]byte, 32)
+ _, _ = rand.Read(b)
+ sentinelVal := fmt.Sprintf("%x", b)
+ writeDataFile("wait.txt", sentinelVal)
+ poll.WaitOn(t, checkFileContents("/app/data/wait.txt", sentinelVal))
+ }
+
+ t.Logf("Writing to a file until Compose watch is up and running")
+ poll.WaitOn(t, func(t poll.LogT) poll.Result {
+ writeDataFile("hello.txt", "hello world")
+ return checkFileContents("/app/data/hello.txt", "hello world")(t)
+ }, poll.WithDelay(time.Second))
+
+ t.Logf("Modifying file contents")
+ writeDataFile("hello.txt", "hello watch")
+ poll.WaitOn(t, checkFileContents("/app/data/hello.txt", "hello watch"))
+
+ t.Logf("Deleting file")
+ require.NoError(t, os.Remove(filepath.Join(dataDir, "hello.txt")))
+ waitForFlush()
+ cli.RunDockerComposeCmdNoCheck(t, "exec", svcName, "stat", "/app/data/hello.txt").
+ Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "No such file or directory",
+ })
+
+ t.Logf("Writing to ignored paths")
+ writeDataFile("data.foo", "ignored")
+ writeDataFile(filepath.Join("ignored", "hello.txt"), "ignored")
+ waitForFlush()
+ cli.RunDockerComposeCmdNoCheck(t, "exec", svcName, "stat", "/app/data/data.foo").
+ Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "No such file or directory",
+ })
+ cli.RunDockerComposeCmdNoCheck(t, "exec", svcName, "stat", "/app/data/ignored").
+ Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "No such file or directory",
+ })
+
+ t.Logf("Creating subdirectory")
+ require.NoError(t, os.Mkdir(filepath.Join(dataDir, "subdir"), 0o700))
+ waitForFlush()
+ cli.RunDockerComposeCmd(t, "exec", svcName, "stat", "/app/data/subdir")
+
+ t.Logf("Writing to file in subdirectory")
+ writeDataFile(filepath.Join("subdir", "file.txt"), "a")
+ poll.WaitOn(t, checkFileContents("/app/data/subdir/file.txt", "a"))
+
+ t.Logf("Writing to file multiple times")
+ writeDataFile(filepath.Join("subdir", "file.txt"), "x")
+ writeDataFile(filepath.Join("subdir", "file.txt"), "y")
+ writeDataFile(filepath.Join("subdir", "file.txt"), "z")
+ poll.WaitOn(t, checkFileContents("/app/data/subdir/file.txt", "z"))
+ writeDataFile(filepath.Join("subdir", "file.txt"), "z")
+ writeDataFile(filepath.Join("subdir", "file.txt"), "y")
+ writeDataFile(filepath.Join("subdir", "file.txt"), "x")
+ poll.WaitOn(t, checkFileContents("/app/data/subdir/file.txt", "x"))
+
+ t.Logf("Deleting directory")
+ require.NoError(t, os.RemoveAll(filepath.Join(dataDir, "subdir")))
+ waitForFlush()
+ cli.RunDockerComposeCmdNoCheck(t, "exec", svcName, "stat", "/app/data/subdir").
+ Assert(t, icmd.Expected{
+ ExitCode: 1,
+ Err: "No such file or directory",
+ })
+
+ t.Logf("Sync and restart use case")
+ require.NoError(t, os.Mkdir(configDir, 0o700))
+ writeTestFile("file.config", "This is an updated config file", configDir)
+ checkRestart := func(state string) poll.Check {
+ return func(pollLog poll.LogT) poll.Result {
+ if strings.Contains(r.Combined(), state) {
+ return poll.Success()
+ }
+ return poll.Continue("%v", r.Combined())
+ }
+ }
+ poll.WaitOn(t, checkRestart(fmt.Sprintf("service(s) [%q] restarted", svcName)))
+ poll.WaitOn(t, checkFileContents("/app/config/file.config", "This is an updated config file"))
+
+ testComplete.Store(true)
+}
+
+func TestWatchExec(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "test_watch_exec"
+
+ defer c.cleanupWithDown(t, projectName)
+
+ tmpdir := t.TempDir()
+ composeFilePath := filepath.Join(tmpdir, "compose.yaml")
+ CopyFile(t, filepath.Join("fixtures", "watch", "exec.yaml"), composeFilePath)
+ cmd := c.NewDockerComposeCmd(t, "-p", projectName, "-f", composeFilePath, "up", "--watch")
+ buffer := bytes.NewBuffer(nil)
+ cmd.Stdout = buffer
+ watch := icmd.StartCmd(cmd)
+
+ poll.WaitOn(t, func(l poll.LogT) poll.Result {
+ out := buffer.String()
+ if strings.Contains(out, "64 bytes from") {
+ return poll.Success()
+ }
+ return poll.Continue("%v", watch.Stdout())
+ })
+
+ t.Logf("Create new file")
+
+ testFile := filepath.Join(tmpdir, "test")
+ require.NoError(t, os.WriteFile(testFile, []byte("test\n"), 0o600))
+
+ poll.WaitOn(t, func(l poll.LogT) poll.Result {
+ out := buffer.String()
+ if strings.Contains(out, "SUCCESS") {
+ return poll.Success()
+ }
+ return poll.Continue("%v", out)
+ })
+ c.RunDockerComposeCmdNoCheck(t, "-p", projectName, "kill", "-s", "9")
+}
+
+func TestWatchMultiServices(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "test_watch_rebuild"
+
+ defer c.cleanupWithDown(t, projectName)
+
+ tmpdir := t.TempDir()
+ composeFilePath := filepath.Join(tmpdir, "compose.yaml")
+ CopyFile(t, filepath.Join("fixtures", "watch", "rebuild.yaml"), composeFilePath)
+
+ testFile := filepath.Join(tmpdir, "test")
+ require.NoError(t, os.WriteFile(testFile, []byte("test"), 0o600))
+
+ cmd := c.NewDockerComposeCmd(t, "-p", projectName, "-f", composeFilePath, "up", "--watch")
+ buffer := bytes.NewBuffer(nil)
+ cmd.Stdout = buffer
+ watch := icmd.StartCmd(cmd)
+
+ poll.WaitOn(t, func(l poll.LogT) poll.Result {
+ if strings.Contains(watch.Stdout(), "Attaching to ") {
+ return poll.Success()
+ }
+ return poll.Continue("%v", watch.Stdout())
+ })
+
+ waitRebuild := func(service string, expected string) {
+ poll.WaitOn(t, func(l poll.LogT) poll.Result {
+ cat := c.RunDockerComposeCmdNoCheck(t, "-p", projectName, "exec", service, "cat", "/data/"+service)
+ if strings.Contains(cat.Stdout(), expected) {
+ return poll.Success()
+ }
+ return poll.Continue("%v", cat.Combined())
+ })
+ }
+ waitRebuild("a", "test")
+ waitRebuild("b", "test")
+ waitRebuild("c", "test")
+
+ require.NoError(t, os.WriteFile(testFile, []byte("updated"), 0o600))
+ waitRebuild("a", "updated")
+ waitRebuild("b", "updated")
+ waitRebuild("c", "updated")
+
+ c.RunDockerComposeCmdNoCheck(t, "-p", projectName, "kill", "-s", "9")
+}
+
+func TestWatchIncludes(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "test_watch_includes"
+
+ defer c.cleanupWithDown(t, projectName)
+
+ tmpdir := t.TempDir()
+ composeFilePath := filepath.Join(tmpdir, "compose.yaml")
+ CopyFile(t, filepath.Join("fixtures", "watch", "include.yaml"), composeFilePath)
+
+ cmd := c.NewDockerComposeCmd(t, "-p", projectName, "-f", composeFilePath, "up", "--watch")
+ buffer := bytes.NewBuffer(nil)
+ cmd.Stdout = buffer
+ watch := icmd.StartCmd(cmd)
+
+ poll.WaitOn(t, func(l poll.LogT) poll.Result {
+ if strings.Contains(watch.Stdout(), "Attaching to ") {
+ return poll.Success()
+ }
+ return poll.Continue("%v", watch.Stdout())
+ })
+
+ require.NoError(t, os.WriteFile(filepath.Join(tmpdir, "B.test"), []byte("test"), 0o600))
+ require.NoError(t, os.WriteFile(filepath.Join(tmpdir, "A.test"), []byte("test"), 0o600))
+
+ poll.WaitOn(t, func(l poll.LogT) poll.Result {
+ cat := c.RunDockerComposeCmdNoCheck(t, "-p", projectName, "exec", "a", "ls", "/data/")
+ if strings.Contains(cat.Stdout(), "A.test") {
+ assert.Check(t, !strings.Contains(cat.Stdout(), "B.test"))
+ return poll.Success()
+ }
+ return poll.Continue("%v", cat.Combined())
+ })
+
+ c.RunDockerComposeCmdNoCheck(t, "-p", projectName, "kill", "-s", "9")
+}
+
+func TestCheckWarningXInitialSyn(t *testing.T) {
+ c := NewCLI(t)
+ const projectName = "test_watch_warn_initial_syn"
+
+ defer c.cleanupWithDown(t, projectName)
+
+ tmpdir := t.TempDir()
+ composeFilePath := filepath.Join(tmpdir, "compose.yaml")
+ CopyFile(t, filepath.Join("fixtures", "watch", "x-initialSync.yaml"), composeFilePath)
+ cmd := c.NewDockerComposeCmd(t, "-p", projectName, "-f", composeFilePath, "--verbose", "up", "--watch")
+ buffer := bytes.NewBuffer(nil)
+ cmd.Stdout = buffer
+ watch := icmd.StartCmd(cmd)
+
+ poll.WaitOn(t, func(l poll.LogT) poll.Result {
+ if strings.Contains(watch.Combined(), "x-initialSync is DEPRECATED, please use the official `initial_sync` attribute") {
+ return poll.Success()
+ }
+ return poll.Continue("%v", watch.Stdout())
+ })
+
+ c.RunDockerComposeCmdNoCheck(t, "-p", projectName, "kill", "-s", "9")
+}
diff --git a/pkg/mocks/mock_docker_api.go b/pkg/mocks/mock_docker_api.go
index bce79f368a3..4a6ebaaccf4 100644
--- a/pkg/mocks/mock_docker_api.go
+++ b/pkg/mocks/mock_docker_api.go
@@ -1,5 +1,10 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/docker/docker/client (interfaces: APIClient)
+//
+// Generated by this command:
+//
+// mockgen -destination pkg/mocks/mock_docker_api.go -package mocks github.com/docker/docker/client APIClient
+//
// Package mocks is a generated GoMock package.
package mocks
@@ -10,9 +15,11 @@ import (
net "net"
http "net/http"
reflect "reflect"
- time "time"
types "github.com/docker/docker/api/types"
+ build "github.com/docker/docker/api/types/build"
+ checkpoint "github.com/docker/docker/api/types/checkpoint"
+ common "github.com/docker/docker/api/types/common"
container "github.com/docker/docker/api/types/container"
events "github.com/docker/docker/api/types/events"
filters "github.com/docker/docker/api/types/filters"
@@ -20,9 +27,11 @@ import (
network "github.com/docker/docker/api/types/network"
registry "github.com/docker/docker/api/types/registry"
swarm "github.com/docker/docker/api/types/swarm"
+ system "github.com/docker/docker/api/types/system"
volume "github.com/docker/docker/api/types/volume"
- gomock "github.com/golang/mock/gomock"
+ client "github.com/docker/docker/client"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
+ gomock "go.uber.org/mock/gomock"
)
// MockAPIClient is a mock of APIClient interface.
@@ -49,16 +58,16 @@ func (m *MockAPIClient) EXPECT() *MockAPIClientMockRecorder {
}
// BuildCachePrune mocks base method.
-func (m *MockAPIClient) BuildCachePrune(arg0 context.Context, arg1 types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
+func (m *MockAPIClient) BuildCachePrune(arg0 context.Context, arg1 build.CachePruneOptions) (*build.CachePruneReport, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BuildCachePrune", arg0, arg1)
- ret0, _ := ret[0].(*types.BuildCachePruneReport)
+ ret0, _ := ret[0].(*build.CachePruneReport)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// BuildCachePrune indicates an expected call of BuildCachePrune.
-func (mr *MockAPIClientMockRecorder) BuildCachePrune(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) BuildCachePrune(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildCachePrune", reflect.TypeOf((*MockAPIClient)(nil).BuildCachePrune), arg0, arg1)
}
@@ -72,13 +81,13 @@ func (m *MockAPIClient) BuildCancel(arg0 context.Context, arg1 string) error {
}
// BuildCancel indicates an expected call of BuildCancel.
-func (mr *MockAPIClientMockRecorder) BuildCancel(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) BuildCancel(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildCancel", reflect.TypeOf((*MockAPIClient)(nil).BuildCancel), arg0, arg1)
}
// CheckpointCreate mocks base method.
-func (m *MockAPIClient) CheckpointCreate(arg0 context.Context, arg1 string, arg2 types.CheckpointCreateOptions) error {
+func (m *MockAPIClient) CheckpointCreate(arg0 context.Context, arg1 string, arg2 checkpoint.CreateOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CheckpointCreate", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
@@ -86,13 +95,13 @@ func (m *MockAPIClient) CheckpointCreate(arg0 context.Context, arg1 string, arg2
}
// CheckpointCreate indicates an expected call of CheckpointCreate.
-func (mr *MockAPIClientMockRecorder) CheckpointCreate(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) CheckpointCreate(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointCreate", reflect.TypeOf((*MockAPIClient)(nil).CheckpointCreate), arg0, arg1, arg2)
}
// CheckpointDelete mocks base method.
-func (m *MockAPIClient) CheckpointDelete(arg0 context.Context, arg1 string, arg2 types.CheckpointDeleteOptions) error {
+func (m *MockAPIClient) CheckpointDelete(arg0 context.Context, arg1 string, arg2 checkpoint.DeleteOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CheckpointDelete", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
@@ -100,22 +109,22 @@ func (m *MockAPIClient) CheckpointDelete(arg0 context.Context, arg1 string, arg2
}
// CheckpointDelete indicates an expected call of CheckpointDelete.
-func (mr *MockAPIClientMockRecorder) CheckpointDelete(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) CheckpointDelete(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointDelete", reflect.TypeOf((*MockAPIClient)(nil).CheckpointDelete), arg0, arg1, arg2)
}
// CheckpointList mocks base method.
-func (m *MockAPIClient) CheckpointList(arg0 context.Context, arg1 string, arg2 types.CheckpointListOptions) ([]types.Checkpoint, error) {
+func (m *MockAPIClient) CheckpointList(arg0 context.Context, arg1 string, arg2 checkpoint.ListOptions) ([]checkpoint.Summary, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CheckpointList", arg0, arg1, arg2)
- ret0, _ := ret[0].([]types.Checkpoint)
+ ret0, _ := ret[0].([]checkpoint.Summary)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CheckpointList indicates an expected call of CheckpointList.
-func (mr *MockAPIClientMockRecorder) CheckpointList(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) CheckpointList(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointList", reflect.TypeOf((*MockAPIClient)(nil).CheckpointList), arg0, arg1, arg2)
}
@@ -149,16 +158,16 @@ func (mr *MockAPIClientMockRecorder) Close() *gomock.Call {
}
// ConfigCreate mocks base method.
-func (m *MockAPIClient) ConfigCreate(arg0 context.Context, arg1 swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
+func (m *MockAPIClient) ConfigCreate(arg0 context.Context, arg1 swarm.ConfigSpec) (swarm.ConfigCreateResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ConfigCreate", arg0, arg1)
- ret0, _ := ret[0].(types.ConfigCreateResponse)
+ ret0, _ := ret[0].(swarm.ConfigCreateResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ConfigCreate indicates an expected call of ConfigCreate.
-func (mr *MockAPIClientMockRecorder) ConfigCreate(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ConfigCreate(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigCreate", reflect.TypeOf((*MockAPIClient)(nil).ConfigCreate), arg0, arg1)
}
@@ -174,13 +183,13 @@ func (m *MockAPIClient) ConfigInspectWithRaw(arg0 context.Context, arg1 string)
}
// ConfigInspectWithRaw indicates an expected call of ConfigInspectWithRaw.
-func (mr *MockAPIClientMockRecorder) ConfigInspectWithRaw(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ConfigInspectWithRaw(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigInspectWithRaw", reflect.TypeOf((*MockAPIClient)(nil).ConfigInspectWithRaw), arg0, arg1)
}
// ConfigList mocks base method.
-func (m *MockAPIClient) ConfigList(arg0 context.Context, arg1 types.ConfigListOptions) ([]swarm.Config, error) {
+func (m *MockAPIClient) ConfigList(arg0 context.Context, arg1 swarm.ConfigListOptions) ([]swarm.Config, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ConfigList", arg0, arg1)
ret0, _ := ret[0].([]swarm.Config)
@@ -189,7 +198,7 @@ func (m *MockAPIClient) ConfigList(arg0 context.Context, arg1 types.ConfigListOp
}
// ConfigList indicates an expected call of ConfigList.
-func (mr *MockAPIClientMockRecorder) ConfigList(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ConfigList(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigList", reflect.TypeOf((*MockAPIClient)(nil).ConfigList), arg0, arg1)
}
@@ -203,7 +212,7 @@ func (m *MockAPIClient) ConfigRemove(arg0 context.Context, arg1 string) error {
}
// ConfigRemove indicates an expected call of ConfigRemove.
-func (mr *MockAPIClientMockRecorder) ConfigRemove(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ConfigRemove(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigRemove", reflect.TypeOf((*MockAPIClient)(nil).ConfigRemove), arg0, arg1)
}
@@ -217,13 +226,13 @@ func (m *MockAPIClient) ConfigUpdate(arg0 context.Context, arg1 string, arg2 swa
}
// ConfigUpdate indicates an expected call of ConfigUpdate.
-func (mr *MockAPIClientMockRecorder) ConfigUpdate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ConfigUpdate(arg0, arg1, arg2, arg3 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigUpdate", reflect.TypeOf((*MockAPIClient)(nil).ConfigUpdate), arg0, arg1, arg2, arg3)
}
// ContainerAttach mocks base method.
-func (m *MockAPIClient) ContainerAttach(arg0 context.Context, arg1 string, arg2 types.ContainerAttachOptions) (types.HijackedResponse, error) {
+func (m *MockAPIClient) ContainerAttach(arg0 context.Context, arg1 string, arg2 container.AttachOptions) (types.HijackedResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerAttach", arg0, arg1, arg2)
ret0, _ := ret[0].(types.HijackedResponse)
@@ -232,58 +241,58 @@ func (m *MockAPIClient) ContainerAttach(arg0 context.Context, arg1 string, arg2
}
// ContainerAttach indicates an expected call of ContainerAttach.
-func (mr *MockAPIClientMockRecorder) ContainerAttach(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerAttach(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerAttach", reflect.TypeOf((*MockAPIClient)(nil).ContainerAttach), arg0, arg1, arg2)
}
// ContainerCommit mocks base method.
-func (m *MockAPIClient) ContainerCommit(arg0 context.Context, arg1 string, arg2 types.ContainerCommitOptions) (types.IDResponse, error) {
+func (m *MockAPIClient) ContainerCommit(arg0 context.Context, arg1 string, arg2 container.CommitOptions) (common.IDResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerCommit", arg0, arg1, arg2)
- ret0, _ := ret[0].(types.IDResponse)
+ ret0, _ := ret[0].(common.IDResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerCommit indicates an expected call of ContainerCommit.
-func (mr *MockAPIClientMockRecorder) ContainerCommit(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerCommit(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerCommit", reflect.TypeOf((*MockAPIClient)(nil).ContainerCommit), arg0, arg1, arg2)
}
// ContainerCreate mocks base method.
-func (m *MockAPIClient) ContainerCreate(arg0 context.Context, arg1 *container.Config, arg2 *container.HostConfig, arg3 *network.NetworkingConfig, arg4 *v1.Platform, arg5 string) (container.ContainerCreateCreatedBody, error) {
+func (m *MockAPIClient) ContainerCreate(arg0 context.Context, arg1 *container.Config, arg2 *container.HostConfig, arg3 *network.NetworkingConfig, arg4 *v1.Platform, arg5 string) (container.CreateResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerCreate", arg0, arg1, arg2, arg3, arg4, arg5)
- ret0, _ := ret[0].(container.ContainerCreateCreatedBody)
+ ret0, _ := ret[0].(container.CreateResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerCreate indicates an expected call of ContainerCreate.
-func (mr *MockAPIClientMockRecorder) ContainerCreate(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerCreate(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerCreate", reflect.TypeOf((*MockAPIClient)(nil).ContainerCreate), arg0, arg1, arg2, arg3, arg4, arg5)
}
// ContainerDiff mocks base method.
-func (m *MockAPIClient) ContainerDiff(arg0 context.Context, arg1 string) ([]container.ContainerChangeResponseItem, error) {
+func (m *MockAPIClient) ContainerDiff(arg0 context.Context, arg1 string) ([]container.FilesystemChange, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerDiff", arg0, arg1)
- ret0, _ := ret[0].([]container.ContainerChangeResponseItem)
+ ret0, _ := ret[0].([]container.FilesystemChange)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerDiff indicates an expected call of ContainerDiff.
-func (mr *MockAPIClientMockRecorder) ContainerDiff(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerDiff(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerDiff", reflect.TypeOf((*MockAPIClient)(nil).ContainerDiff), arg0, arg1)
}
// ContainerExecAttach mocks base method.
-func (m *MockAPIClient) ContainerExecAttach(arg0 context.Context, arg1 string, arg2 types.ExecStartCheck) (types.HijackedResponse, error) {
+func (m *MockAPIClient) ContainerExecAttach(arg0 context.Context, arg1 string, arg2 container.ExecStartOptions) (types.HijackedResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerExecAttach", arg0, arg1, arg2)
ret0, _ := ret[0].(types.HijackedResponse)
@@ -292,43 +301,43 @@ func (m *MockAPIClient) ContainerExecAttach(arg0 context.Context, arg1 string, a
}
// ContainerExecAttach indicates an expected call of ContainerExecAttach.
-func (mr *MockAPIClientMockRecorder) ContainerExecAttach(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerExecAttach(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerExecAttach", reflect.TypeOf((*MockAPIClient)(nil).ContainerExecAttach), arg0, arg1, arg2)
}
// ContainerExecCreate mocks base method.
-func (m *MockAPIClient) ContainerExecCreate(arg0 context.Context, arg1 string, arg2 types.ExecConfig) (types.IDResponse, error) {
+func (m *MockAPIClient) ContainerExecCreate(arg0 context.Context, arg1 string, arg2 container.ExecOptions) (common.IDResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerExecCreate", arg0, arg1, arg2)
- ret0, _ := ret[0].(types.IDResponse)
+ ret0, _ := ret[0].(common.IDResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerExecCreate indicates an expected call of ContainerExecCreate.
-func (mr *MockAPIClientMockRecorder) ContainerExecCreate(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerExecCreate(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerExecCreate", reflect.TypeOf((*MockAPIClient)(nil).ContainerExecCreate), arg0, arg1, arg2)
}
// ContainerExecInspect mocks base method.
-func (m *MockAPIClient) ContainerExecInspect(arg0 context.Context, arg1 string) (types.ContainerExecInspect, error) {
+func (m *MockAPIClient) ContainerExecInspect(arg0 context.Context, arg1 string) (container.ExecInspect, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerExecInspect", arg0, arg1)
- ret0, _ := ret[0].(types.ContainerExecInspect)
+ ret0, _ := ret[0].(container.ExecInspect)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerExecInspect indicates an expected call of ContainerExecInspect.
-func (mr *MockAPIClientMockRecorder) ContainerExecInspect(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerExecInspect(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerExecInspect", reflect.TypeOf((*MockAPIClient)(nil).ContainerExecInspect), arg0, arg1)
}
// ContainerExecResize mocks base method.
-func (m *MockAPIClient) ContainerExecResize(arg0 context.Context, arg1 string, arg2 types.ResizeOptions) error {
+func (m *MockAPIClient) ContainerExecResize(arg0 context.Context, arg1 string, arg2 container.ResizeOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerExecResize", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
@@ -336,13 +345,13 @@ func (m *MockAPIClient) ContainerExecResize(arg0 context.Context, arg1 string, a
}
// ContainerExecResize indicates an expected call of ContainerExecResize.
-func (mr *MockAPIClientMockRecorder) ContainerExecResize(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerExecResize(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerExecResize", reflect.TypeOf((*MockAPIClient)(nil).ContainerExecResize), arg0, arg1, arg2)
}
// ContainerExecStart mocks base method.
-func (m *MockAPIClient) ContainerExecStart(arg0 context.Context, arg1 string, arg2 types.ExecStartCheck) error {
+func (m *MockAPIClient) ContainerExecStart(arg0 context.Context, arg1 string, arg2 container.ExecStartOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerExecStart", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
@@ -350,7 +359,7 @@ func (m *MockAPIClient) ContainerExecStart(arg0 context.Context, arg1 string, ar
}
// ContainerExecStart indicates an expected call of ContainerExecStart.
-func (mr *MockAPIClientMockRecorder) ContainerExecStart(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerExecStart(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerExecStart", reflect.TypeOf((*MockAPIClient)(nil).ContainerExecStart), arg0, arg1, arg2)
}
@@ -365,38 +374,38 @@ func (m *MockAPIClient) ContainerExport(arg0 context.Context, arg1 string) (io.R
}
// ContainerExport indicates an expected call of ContainerExport.
-func (mr *MockAPIClientMockRecorder) ContainerExport(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerExport(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerExport", reflect.TypeOf((*MockAPIClient)(nil).ContainerExport), arg0, arg1)
}
// ContainerInspect mocks base method.
-func (m *MockAPIClient) ContainerInspect(arg0 context.Context, arg1 string) (types.ContainerJSON, error) {
+func (m *MockAPIClient) ContainerInspect(arg0 context.Context, arg1 string) (container.InspectResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerInspect", arg0, arg1)
- ret0, _ := ret[0].(types.ContainerJSON)
+ ret0, _ := ret[0].(container.InspectResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerInspect indicates an expected call of ContainerInspect.
-func (mr *MockAPIClientMockRecorder) ContainerInspect(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerInspect(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerInspect", reflect.TypeOf((*MockAPIClient)(nil).ContainerInspect), arg0, arg1)
}
// ContainerInspectWithRaw mocks base method.
-func (m *MockAPIClient) ContainerInspectWithRaw(arg0 context.Context, arg1 string, arg2 bool) (types.ContainerJSON, []byte, error) {
+func (m *MockAPIClient) ContainerInspectWithRaw(arg0 context.Context, arg1 string, arg2 bool) (container.InspectResponse, []byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerInspectWithRaw", arg0, arg1, arg2)
- ret0, _ := ret[0].(types.ContainerJSON)
+ ret0, _ := ret[0].(container.InspectResponse)
ret1, _ := ret[1].([]byte)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// ContainerInspectWithRaw indicates an expected call of ContainerInspectWithRaw.
-func (mr *MockAPIClientMockRecorder) ContainerInspectWithRaw(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerInspectWithRaw(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerInspectWithRaw", reflect.TypeOf((*MockAPIClient)(nil).ContainerInspectWithRaw), arg0, arg1, arg2)
}
@@ -410,28 +419,28 @@ func (m *MockAPIClient) ContainerKill(arg0 context.Context, arg1, arg2 string) e
}
// ContainerKill indicates an expected call of ContainerKill.
-func (mr *MockAPIClientMockRecorder) ContainerKill(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerKill(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerKill", reflect.TypeOf((*MockAPIClient)(nil).ContainerKill), arg0, arg1, arg2)
}
// ContainerList mocks base method.
-func (m *MockAPIClient) ContainerList(arg0 context.Context, arg1 types.ContainerListOptions) ([]types.Container, error) {
+func (m *MockAPIClient) ContainerList(arg0 context.Context, arg1 container.ListOptions) ([]container.Summary, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerList", arg0, arg1)
- ret0, _ := ret[0].([]types.Container)
+ ret0, _ := ret[0].([]container.Summary)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerList indicates an expected call of ContainerList.
-func (mr *MockAPIClientMockRecorder) ContainerList(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerList(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerList", reflect.TypeOf((*MockAPIClient)(nil).ContainerList), arg0, arg1)
}
// ContainerLogs mocks base method.
-func (m *MockAPIClient) ContainerLogs(arg0 context.Context, arg1 string, arg2 types.ContainerLogsOptions) (io.ReadCloser, error) {
+func (m *MockAPIClient) ContainerLogs(arg0 context.Context, arg1 string, arg2 container.LogsOptions) (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerLogs", arg0, arg1, arg2)
ret0, _ := ret[0].(io.ReadCloser)
@@ -440,7 +449,7 @@ func (m *MockAPIClient) ContainerLogs(arg0 context.Context, arg1 string, arg2 ty
}
// ContainerLogs indicates an expected call of ContainerLogs.
-func (mr *MockAPIClientMockRecorder) ContainerLogs(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerLogs(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerLogs", reflect.TypeOf((*MockAPIClient)(nil).ContainerLogs), arg0, arg1, arg2)
}
@@ -454,13 +463,13 @@ func (m *MockAPIClient) ContainerPause(arg0 context.Context, arg1 string) error
}
// ContainerPause indicates an expected call of ContainerPause.
-func (mr *MockAPIClientMockRecorder) ContainerPause(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerPause(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerPause", reflect.TypeOf((*MockAPIClient)(nil).ContainerPause), arg0, arg1)
}
// ContainerRemove mocks base method.
-func (m *MockAPIClient) ContainerRemove(arg0 context.Context, arg1 string, arg2 types.ContainerRemoveOptions) error {
+func (m *MockAPIClient) ContainerRemove(arg0 context.Context, arg1 string, arg2 container.RemoveOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerRemove", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
@@ -468,7 +477,7 @@ func (m *MockAPIClient) ContainerRemove(arg0 context.Context, arg1 string, arg2
}
// ContainerRemove indicates an expected call of ContainerRemove.
-func (mr *MockAPIClientMockRecorder) ContainerRemove(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerRemove(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerRemove", reflect.TypeOf((*MockAPIClient)(nil).ContainerRemove), arg0, arg1, arg2)
}
@@ -482,13 +491,13 @@ func (m *MockAPIClient) ContainerRename(arg0 context.Context, arg1, arg2 string)
}
// ContainerRename indicates an expected call of ContainerRename.
-func (mr *MockAPIClientMockRecorder) ContainerRename(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerRename(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerRename", reflect.TypeOf((*MockAPIClient)(nil).ContainerRename), arg0, arg1, arg2)
}
// ContainerResize mocks base method.
-func (m *MockAPIClient) ContainerResize(arg0 context.Context, arg1 string, arg2 types.ResizeOptions) error {
+func (m *MockAPIClient) ContainerResize(arg0 context.Context, arg1 string, arg2 container.ResizeOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerResize", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
@@ -496,13 +505,13 @@ func (m *MockAPIClient) ContainerResize(arg0 context.Context, arg1 string, arg2
}
// ContainerResize indicates an expected call of ContainerResize.
-func (mr *MockAPIClientMockRecorder) ContainerResize(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerResize(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerResize", reflect.TypeOf((*MockAPIClient)(nil).ContainerResize), arg0, arg1, arg2)
}
// ContainerRestart mocks base method.
-func (m *MockAPIClient) ContainerRestart(arg0 context.Context, arg1 string, arg2 *time.Duration) error {
+func (m *MockAPIClient) ContainerRestart(arg0 context.Context, arg1 string, arg2 container.StopOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerRestart", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
@@ -510,13 +519,13 @@ func (m *MockAPIClient) ContainerRestart(arg0 context.Context, arg1 string, arg2
}
// ContainerRestart indicates an expected call of ContainerRestart.
-func (mr *MockAPIClientMockRecorder) ContainerRestart(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerRestart(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerRestart", reflect.TypeOf((*MockAPIClient)(nil).ContainerRestart), arg0, arg1, arg2)
}
// ContainerStart mocks base method.
-func (m *MockAPIClient) ContainerStart(arg0 context.Context, arg1 string, arg2 types.ContainerStartOptions) error {
+func (m *MockAPIClient) ContainerStart(arg0 context.Context, arg1 string, arg2 container.StartOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerStart", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
@@ -524,58 +533,58 @@ func (m *MockAPIClient) ContainerStart(arg0 context.Context, arg1 string, arg2 t
}
// ContainerStart indicates an expected call of ContainerStart.
-func (mr *MockAPIClientMockRecorder) ContainerStart(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerStart(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerStart", reflect.TypeOf((*MockAPIClient)(nil).ContainerStart), arg0, arg1, arg2)
}
// ContainerStatPath mocks base method.
-func (m *MockAPIClient) ContainerStatPath(arg0 context.Context, arg1, arg2 string) (types.ContainerPathStat, error) {
+func (m *MockAPIClient) ContainerStatPath(arg0 context.Context, arg1, arg2 string) (container.PathStat, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerStatPath", arg0, arg1, arg2)
- ret0, _ := ret[0].(types.ContainerPathStat)
+ ret0, _ := ret[0].(container.PathStat)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerStatPath indicates an expected call of ContainerStatPath.
-func (mr *MockAPIClientMockRecorder) ContainerStatPath(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerStatPath(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerStatPath", reflect.TypeOf((*MockAPIClient)(nil).ContainerStatPath), arg0, arg1, arg2)
}
// ContainerStats mocks base method.
-func (m *MockAPIClient) ContainerStats(arg0 context.Context, arg1 string, arg2 bool) (types.ContainerStats, error) {
+func (m *MockAPIClient) ContainerStats(arg0 context.Context, arg1 string, arg2 bool) (container.StatsResponseReader, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerStats", arg0, arg1, arg2)
- ret0, _ := ret[0].(types.ContainerStats)
+ ret0, _ := ret[0].(container.StatsResponseReader)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerStats indicates an expected call of ContainerStats.
-func (mr *MockAPIClientMockRecorder) ContainerStats(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerStats(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerStats", reflect.TypeOf((*MockAPIClient)(nil).ContainerStats), arg0, arg1, arg2)
}
// ContainerStatsOneShot mocks base method.
-func (m *MockAPIClient) ContainerStatsOneShot(arg0 context.Context, arg1 string) (types.ContainerStats, error) {
+func (m *MockAPIClient) ContainerStatsOneShot(arg0 context.Context, arg1 string) (container.StatsResponseReader, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerStatsOneShot", arg0, arg1)
- ret0, _ := ret[0].(types.ContainerStats)
+ ret0, _ := ret[0].(container.StatsResponseReader)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerStatsOneShot indicates an expected call of ContainerStatsOneShot.
-func (mr *MockAPIClientMockRecorder) ContainerStatsOneShot(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerStatsOneShot(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerStatsOneShot", reflect.TypeOf((*MockAPIClient)(nil).ContainerStatsOneShot), arg0, arg1)
}
// ContainerStop mocks base method.
-func (m *MockAPIClient) ContainerStop(arg0 context.Context, arg1 string, arg2 *time.Duration) error {
+func (m *MockAPIClient) ContainerStop(arg0 context.Context, arg1 string, arg2 container.StopOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerStop", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
@@ -583,22 +592,22 @@ func (m *MockAPIClient) ContainerStop(arg0 context.Context, arg1 string, arg2 *t
}
// ContainerStop indicates an expected call of ContainerStop.
-func (mr *MockAPIClientMockRecorder) ContainerStop(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerStop(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerStop", reflect.TypeOf((*MockAPIClient)(nil).ContainerStop), arg0, arg1, arg2)
}
// ContainerTop mocks base method.
-func (m *MockAPIClient) ContainerTop(arg0 context.Context, arg1 string, arg2 []string) (container.ContainerTopOKBody, error) {
+func (m *MockAPIClient) ContainerTop(arg0 context.Context, arg1 string, arg2 []string) (container.TopResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerTop", arg0, arg1, arg2)
- ret0, _ := ret[0].(container.ContainerTopOKBody)
+ ret0, _ := ret[0].(container.TopResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerTop indicates an expected call of ContainerTop.
-func (mr *MockAPIClientMockRecorder) ContainerTop(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerTop(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerTop", reflect.TypeOf((*MockAPIClient)(nil).ContainerTop), arg0, arg1, arg2)
}
@@ -612,74 +621,74 @@ func (m *MockAPIClient) ContainerUnpause(arg0 context.Context, arg1 string) erro
}
// ContainerUnpause indicates an expected call of ContainerUnpause.
-func (mr *MockAPIClientMockRecorder) ContainerUnpause(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerUnpause(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerUnpause", reflect.TypeOf((*MockAPIClient)(nil).ContainerUnpause), arg0, arg1)
}
// ContainerUpdate mocks base method.
-func (m *MockAPIClient) ContainerUpdate(arg0 context.Context, arg1 string, arg2 container.UpdateConfig) (container.ContainerUpdateOKBody, error) {
+func (m *MockAPIClient) ContainerUpdate(arg0 context.Context, arg1 string, arg2 container.UpdateConfig) (container.UpdateResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerUpdate", arg0, arg1, arg2)
- ret0, _ := ret[0].(container.ContainerUpdateOKBody)
+ ret0, _ := ret[0].(container.UpdateResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerUpdate indicates an expected call of ContainerUpdate.
-func (mr *MockAPIClientMockRecorder) ContainerUpdate(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerUpdate(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerUpdate", reflect.TypeOf((*MockAPIClient)(nil).ContainerUpdate), arg0, arg1, arg2)
}
// ContainerWait mocks base method.
-func (m *MockAPIClient) ContainerWait(arg0 context.Context, arg1 string, arg2 container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) {
+func (m *MockAPIClient) ContainerWait(arg0 context.Context, arg1 string, arg2 container.WaitCondition) (<-chan container.WaitResponse, <-chan error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerWait", arg0, arg1, arg2)
- ret0, _ := ret[0].(<-chan container.ContainerWaitOKBody)
+ ret0, _ := ret[0].(<-chan container.WaitResponse)
ret1, _ := ret[1].(<-chan error)
return ret0, ret1
}
// ContainerWait indicates an expected call of ContainerWait.
-func (mr *MockAPIClientMockRecorder) ContainerWait(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainerWait(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerWait", reflect.TypeOf((*MockAPIClient)(nil).ContainerWait), arg0, arg1, arg2)
}
// ContainersPrune mocks base method.
-func (m *MockAPIClient) ContainersPrune(arg0 context.Context, arg1 filters.Args) (types.ContainersPruneReport, error) {
+func (m *MockAPIClient) ContainersPrune(arg0 context.Context, arg1 filters.Args) (container.PruneReport, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainersPrune", arg0, arg1)
- ret0, _ := ret[0].(types.ContainersPruneReport)
+ ret0, _ := ret[0].(container.PruneReport)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainersPrune indicates an expected call of ContainersPrune.
-func (mr *MockAPIClientMockRecorder) ContainersPrune(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ContainersPrune(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainersPrune", reflect.TypeOf((*MockAPIClient)(nil).ContainersPrune), arg0, arg1)
}
// CopyFromContainer mocks base method.
-func (m *MockAPIClient) CopyFromContainer(arg0 context.Context, arg1, arg2 string) (io.ReadCloser, types.ContainerPathStat, error) {
+func (m *MockAPIClient) CopyFromContainer(arg0 context.Context, arg1, arg2 string) (io.ReadCloser, container.PathStat, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CopyFromContainer", arg0, arg1, arg2)
ret0, _ := ret[0].(io.ReadCloser)
- ret1, _ := ret[1].(types.ContainerPathStat)
+ ret1, _ := ret[1].(container.PathStat)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// CopyFromContainer indicates an expected call of CopyFromContainer.
-func (mr *MockAPIClientMockRecorder) CopyFromContainer(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) CopyFromContainer(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyFromContainer", reflect.TypeOf((*MockAPIClient)(nil).CopyFromContainer), arg0, arg1, arg2)
}
// CopyToContainer mocks base method.
-func (m *MockAPIClient) CopyToContainer(arg0 context.Context, arg1, arg2 string, arg3 io.Reader, arg4 types.CopyToContainerOptions) error {
+func (m *MockAPIClient) CopyToContainer(arg0 context.Context, arg1, arg2 string, arg3 io.Reader, arg4 container.CopyToContainerOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CopyToContainer", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
@@ -687,7 +696,7 @@ func (m *MockAPIClient) CopyToContainer(arg0 context.Context, arg1, arg2 string,
}
// CopyToContainer indicates an expected call of CopyToContainer.
-func (mr *MockAPIClientMockRecorder) CopyToContainer(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) CopyToContainer(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyToContainer", reflect.TypeOf((*MockAPIClient)(nil).CopyToContainer), arg0, arg1, arg2, arg3, arg4)
}
@@ -716,7 +725,7 @@ func (m *MockAPIClient) DialHijack(arg0 context.Context, arg1, arg2 string, arg3
}
// DialHijack indicates an expected call of DialHijack.
-func (mr *MockAPIClientMockRecorder) DialHijack(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) DialHijack(arg0, arg1, arg2, arg3 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DialHijack", reflect.TypeOf((*MockAPIClient)(nil).DialHijack), arg0, arg1, arg2, arg3)
}
@@ -745,7 +754,7 @@ func (m *MockAPIClient) DiskUsage(arg0 context.Context, arg1 types.DiskUsageOpti
}
// DiskUsage indicates an expected call of DiskUsage.
-func (mr *MockAPIClientMockRecorder) DiskUsage(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) DiskUsage(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiskUsage", reflect.TypeOf((*MockAPIClient)(nil).DiskUsage), arg0, arg1)
}
@@ -760,13 +769,13 @@ func (m *MockAPIClient) DistributionInspect(arg0 context.Context, arg1, arg2 str
}
// DistributionInspect indicates an expected call of DistributionInspect.
-func (mr *MockAPIClientMockRecorder) DistributionInspect(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) DistributionInspect(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DistributionInspect", reflect.TypeOf((*MockAPIClient)(nil).DistributionInspect), arg0, arg1, arg2)
}
// Events mocks base method.
-func (m *MockAPIClient) Events(arg0 context.Context, arg1 types.EventsOptions) (<-chan events.Message, <-chan error) {
+func (m *MockAPIClient) Events(arg0 context.Context, arg1 events.ListOptions) (<-chan events.Message, <-chan error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Events", arg0, arg1)
ret0, _ := ret[0].(<-chan events.Message)
@@ -775,7 +784,7 @@ func (m *MockAPIClient) Events(arg0 context.Context, arg1 types.EventsOptions) (
}
// Events indicates an expected call of Events.
-func (mr *MockAPIClientMockRecorder) Events(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) Events(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Events", reflect.TypeOf((*MockAPIClient)(nil).Events), arg0, arg1)
}
@@ -795,22 +804,22 @@ func (mr *MockAPIClientMockRecorder) HTTPClient() *gomock.Call {
}
// ImageBuild mocks base method.
-func (m *MockAPIClient) ImageBuild(arg0 context.Context, arg1 io.Reader, arg2 types.ImageBuildOptions) (types.ImageBuildResponse, error) {
+func (m *MockAPIClient) ImageBuild(arg0 context.Context, arg1 io.Reader, arg2 build.ImageBuildOptions) (build.ImageBuildResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageBuild", arg0, arg1, arg2)
- ret0, _ := ret[0].(types.ImageBuildResponse)
+ ret0, _ := ret[0].(build.ImageBuildResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImageBuild indicates an expected call of ImageBuild.
-func (mr *MockAPIClientMockRecorder) ImageBuild(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImageBuild(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageBuild", reflect.TypeOf((*MockAPIClient)(nil).ImageBuild), arg0, arg1, arg2)
}
// ImageCreate mocks base method.
-func (m *MockAPIClient) ImageCreate(arg0 context.Context, arg1 string, arg2 types.ImageCreateOptions) (io.ReadCloser, error) {
+func (m *MockAPIClient) ImageCreate(arg0 context.Context, arg1 string, arg2 image.CreateOptions) (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageCreate", arg0, arg1, arg2)
ret0, _ := ret[0].(io.ReadCloser)
@@ -819,28 +828,33 @@ func (m *MockAPIClient) ImageCreate(arg0 context.Context, arg1 string, arg2 type
}
// ImageCreate indicates an expected call of ImageCreate.
-func (mr *MockAPIClientMockRecorder) ImageCreate(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImageCreate(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageCreate", reflect.TypeOf((*MockAPIClient)(nil).ImageCreate), arg0, arg1, arg2)
}
// ImageHistory mocks base method.
-func (m *MockAPIClient) ImageHistory(arg0 context.Context, arg1 string) ([]image.HistoryResponseItem, error) {
+func (m *MockAPIClient) ImageHistory(arg0 context.Context, arg1 string, arg2 ...client.ImageHistoryOption) ([]image.HistoryResponseItem, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ImageHistory", arg0, arg1)
+ varargs := []any{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ImageHistory", varargs...)
ret0, _ := ret[0].([]image.HistoryResponseItem)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImageHistory indicates an expected call of ImageHistory.
-func (mr *MockAPIClientMockRecorder) ImageHistory(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImageHistory(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageHistory", reflect.TypeOf((*MockAPIClient)(nil).ImageHistory), arg0, arg1)
+ varargs := append([]any{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageHistory", reflect.TypeOf((*MockAPIClient)(nil).ImageHistory), varargs...)
}
// ImageImport mocks base method.
-func (m *MockAPIClient) ImageImport(arg0 context.Context, arg1 types.ImageImportSource, arg2 string, arg3 types.ImageImportOptions) (io.ReadCloser, error) {
+func (m *MockAPIClient) ImageImport(arg0 context.Context, arg1 image.ImportSource, arg2 string, arg3 image.ImportOptions) (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageImport", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(io.ReadCloser)
@@ -849,59 +863,84 @@ func (m *MockAPIClient) ImageImport(arg0 context.Context, arg1 types.ImageImport
}
// ImageImport indicates an expected call of ImageImport.
-func (mr *MockAPIClientMockRecorder) ImageImport(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImageImport(arg0, arg1, arg2, arg3 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageImport", reflect.TypeOf((*MockAPIClient)(nil).ImageImport), arg0, arg1, arg2, arg3)
}
+// ImageInspect mocks base method.
+func (m *MockAPIClient) ImageInspect(arg0 context.Context, arg1 string, arg2 ...client.ImageInspectOption) (image.InspectResponse, error) {
+ m.ctrl.T.Helper()
+ varargs := []any{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ImageInspect", varargs...)
+ ret0, _ := ret[0].(image.InspectResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ImageInspect indicates an expected call of ImageInspect.
+func (mr *MockAPIClientMockRecorder) ImageInspect(arg0, arg1 any, arg2 ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageInspect", reflect.TypeOf((*MockAPIClient)(nil).ImageInspect), varargs...)
+}
+
// ImageInspectWithRaw mocks base method.
-func (m *MockAPIClient) ImageInspectWithRaw(arg0 context.Context, arg1 string) (types.ImageInspect, []byte, error) {
+func (m *MockAPIClient) ImageInspectWithRaw(arg0 context.Context, arg1 string) (image.InspectResponse, []byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageInspectWithRaw", arg0, arg1)
- ret0, _ := ret[0].(types.ImageInspect)
+ ret0, _ := ret[0].(image.InspectResponse)
ret1, _ := ret[1].([]byte)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// ImageInspectWithRaw indicates an expected call of ImageInspectWithRaw.
-func (mr *MockAPIClientMockRecorder) ImageInspectWithRaw(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImageInspectWithRaw(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageInspectWithRaw", reflect.TypeOf((*MockAPIClient)(nil).ImageInspectWithRaw), arg0, arg1)
}
// ImageList mocks base method.
-func (m *MockAPIClient) ImageList(arg0 context.Context, arg1 types.ImageListOptions) ([]types.ImageSummary, error) {
+func (m *MockAPIClient) ImageList(arg0 context.Context, arg1 image.ListOptions) ([]image.Summary, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageList", arg0, arg1)
- ret0, _ := ret[0].([]types.ImageSummary)
+ ret0, _ := ret[0].([]image.Summary)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImageList indicates an expected call of ImageList.
-func (mr *MockAPIClientMockRecorder) ImageList(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImageList(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageList", reflect.TypeOf((*MockAPIClient)(nil).ImageList), arg0, arg1)
}
// ImageLoad mocks base method.
-func (m *MockAPIClient) ImageLoad(arg0 context.Context, arg1 io.Reader, arg2 bool) (types.ImageLoadResponse, error) {
+func (m *MockAPIClient) ImageLoad(arg0 context.Context, arg1 io.Reader, arg2 ...client.ImageLoadOption) (image.LoadResponse, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ImageLoad", arg0, arg1, arg2)
- ret0, _ := ret[0].(types.ImageLoadResponse)
+ varargs := []any{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ImageLoad", varargs...)
+ ret0, _ := ret[0].(image.LoadResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImageLoad indicates an expected call of ImageLoad.
-func (mr *MockAPIClientMockRecorder) ImageLoad(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImageLoad(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageLoad", reflect.TypeOf((*MockAPIClient)(nil).ImageLoad), arg0, arg1, arg2)
+ varargs := append([]any{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageLoad", reflect.TypeOf((*MockAPIClient)(nil).ImageLoad), varargs...)
}
// ImagePull mocks base method.
-func (m *MockAPIClient) ImagePull(arg0 context.Context, arg1 string, arg2 types.ImagePullOptions) (io.ReadCloser, error) {
+func (m *MockAPIClient) ImagePull(arg0 context.Context, arg1 string, arg2 image.PullOptions) (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImagePull", arg0, arg1, arg2)
ret0, _ := ret[0].(io.ReadCloser)
@@ -910,13 +949,13 @@ func (m *MockAPIClient) ImagePull(arg0 context.Context, arg1 string, arg2 types.
}
// ImagePull indicates an expected call of ImagePull.
-func (mr *MockAPIClientMockRecorder) ImagePull(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImagePull(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImagePull", reflect.TypeOf((*MockAPIClient)(nil).ImagePull), arg0, arg1, arg2)
}
// ImagePush mocks base method.
-func (m *MockAPIClient) ImagePush(arg0 context.Context, arg1 string, arg2 types.ImagePushOptions) (io.ReadCloser, error) {
+func (m *MockAPIClient) ImagePush(arg0 context.Context, arg1 string, arg2 image.PushOptions) (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImagePush", arg0, arg1, arg2)
ret0, _ := ret[0].(io.ReadCloser)
@@ -925,43 +964,48 @@ func (m *MockAPIClient) ImagePush(arg0 context.Context, arg1 string, arg2 types.
}
// ImagePush indicates an expected call of ImagePush.
-func (mr *MockAPIClientMockRecorder) ImagePush(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImagePush(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImagePush", reflect.TypeOf((*MockAPIClient)(nil).ImagePush), arg0, arg1, arg2)
}
// ImageRemove mocks base method.
-func (m *MockAPIClient) ImageRemove(arg0 context.Context, arg1 string, arg2 types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) {
+func (m *MockAPIClient) ImageRemove(arg0 context.Context, arg1 string, arg2 image.RemoveOptions) ([]image.DeleteResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageRemove", arg0, arg1, arg2)
- ret0, _ := ret[0].([]types.ImageDeleteResponseItem)
+ ret0, _ := ret[0].([]image.DeleteResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImageRemove indicates an expected call of ImageRemove.
-func (mr *MockAPIClientMockRecorder) ImageRemove(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImageRemove(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageRemove", reflect.TypeOf((*MockAPIClient)(nil).ImageRemove), arg0, arg1, arg2)
}
// ImageSave mocks base method.
-func (m *MockAPIClient) ImageSave(arg0 context.Context, arg1 []string) (io.ReadCloser, error) {
+func (m *MockAPIClient) ImageSave(arg0 context.Context, arg1 []string, arg2 ...client.ImageSaveOption) (io.ReadCloser, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ImageSave", arg0, arg1)
+ varargs := []any{arg0, arg1}
+ for _, a := range arg2 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ImageSave", varargs...)
ret0, _ := ret[0].(io.ReadCloser)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImageSave indicates an expected call of ImageSave.
-func (mr *MockAPIClientMockRecorder) ImageSave(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImageSave(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageSave", reflect.TypeOf((*MockAPIClient)(nil).ImageSave), arg0, arg1)
+ varargs := append([]any{arg0, arg1}, arg2...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageSave", reflect.TypeOf((*MockAPIClient)(nil).ImageSave), varargs...)
}
// ImageSearch mocks base method.
-func (m *MockAPIClient) ImageSearch(arg0 context.Context, arg1 string, arg2 types.ImageSearchOptions) ([]registry.SearchResult, error) {
+func (m *MockAPIClient) ImageSearch(arg0 context.Context, arg1 string, arg2 registry.SearchOptions) ([]registry.SearchResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageSearch", arg0, arg1, arg2)
ret0, _ := ret[0].([]registry.SearchResult)
@@ -970,7 +1014,7 @@ func (m *MockAPIClient) ImageSearch(arg0 context.Context, arg1 string, arg2 type
}
// ImageSearch indicates an expected call of ImageSearch.
-func (mr *MockAPIClientMockRecorder) ImageSearch(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImageSearch(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageSearch", reflect.TypeOf((*MockAPIClient)(nil).ImageSearch), arg0, arg1, arg2)
}
@@ -984,37 +1028,37 @@ func (m *MockAPIClient) ImageTag(arg0 context.Context, arg1, arg2 string) error
}
// ImageTag indicates an expected call of ImageTag.
-func (mr *MockAPIClientMockRecorder) ImageTag(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImageTag(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageTag", reflect.TypeOf((*MockAPIClient)(nil).ImageTag), arg0, arg1, arg2)
}
// ImagesPrune mocks base method.
-func (m *MockAPIClient) ImagesPrune(arg0 context.Context, arg1 filters.Args) (types.ImagesPruneReport, error) {
+func (m *MockAPIClient) ImagesPrune(arg0 context.Context, arg1 filters.Args) (image.PruneReport, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImagesPrune", arg0, arg1)
- ret0, _ := ret[0].(types.ImagesPruneReport)
+ ret0, _ := ret[0].(image.PruneReport)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImagesPrune indicates an expected call of ImagesPrune.
-func (mr *MockAPIClientMockRecorder) ImagesPrune(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ImagesPrune(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImagesPrune", reflect.TypeOf((*MockAPIClient)(nil).ImagesPrune), arg0, arg1)
}
// Info mocks base method.
-func (m *MockAPIClient) Info(arg0 context.Context) (types.Info, error) {
+func (m *MockAPIClient) Info(arg0 context.Context) (system.Info, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Info", arg0)
- ret0, _ := ret[0].(types.Info)
+ ret0, _ := ret[0].(system.Info)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Info indicates an expected call of Info.
-func (mr *MockAPIClientMockRecorder) Info(arg0 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) Info(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockAPIClient)(nil).Info), arg0)
}
@@ -1026,7 +1070,7 @@ func (m *MockAPIClient) NegotiateAPIVersion(arg0 context.Context) {
}
// NegotiateAPIVersion indicates an expected call of NegotiateAPIVersion.
-func (mr *MockAPIClientMockRecorder) NegotiateAPIVersion(arg0 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NegotiateAPIVersion(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NegotiateAPIVersion", reflect.TypeOf((*MockAPIClient)(nil).NegotiateAPIVersion), arg0)
}
@@ -1038,7 +1082,7 @@ func (m *MockAPIClient) NegotiateAPIVersionPing(arg0 types.Ping) {
}
// NegotiateAPIVersionPing indicates an expected call of NegotiateAPIVersionPing.
-func (mr *MockAPIClientMockRecorder) NegotiateAPIVersionPing(arg0 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NegotiateAPIVersionPing(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NegotiateAPIVersionPing", reflect.TypeOf((*MockAPIClient)(nil).NegotiateAPIVersionPing), arg0)
}
@@ -1052,22 +1096,22 @@ func (m *MockAPIClient) NetworkConnect(arg0 context.Context, arg1, arg2 string,
}
// NetworkConnect indicates an expected call of NetworkConnect.
-func (mr *MockAPIClientMockRecorder) NetworkConnect(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NetworkConnect(arg0, arg1, arg2, arg3 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkConnect", reflect.TypeOf((*MockAPIClient)(nil).NetworkConnect), arg0, arg1, arg2, arg3)
}
// NetworkCreate mocks base method.
-func (m *MockAPIClient) NetworkCreate(arg0 context.Context, arg1 string, arg2 types.NetworkCreate) (types.NetworkCreateResponse, error) {
+func (m *MockAPIClient) NetworkCreate(arg0 context.Context, arg1 string, arg2 network.CreateOptions) (network.CreateResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetworkCreate", arg0, arg1, arg2)
- ret0, _ := ret[0].(types.NetworkCreateResponse)
+ ret0, _ := ret[0].(network.CreateResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NetworkCreate indicates an expected call of NetworkCreate.
-func (mr *MockAPIClientMockRecorder) NetworkCreate(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NetworkCreate(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkCreate", reflect.TypeOf((*MockAPIClient)(nil).NetworkCreate), arg0, arg1, arg2)
}
@@ -1081,53 +1125,53 @@ func (m *MockAPIClient) NetworkDisconnect(arg0 context.Context, arg1, arg2 strin
}
// NetworkDisconnect indicates an expected call of NetworkDisconnect.
-func (mr *MockAPIClientMockRecorder) NetworkDisconnect(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NetworkDisconnect(arg0, arg1, arg2, arg3 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkDisconnect", reflect.TypeOf((*MockAPIClient)(nil).NetworkDisconnect), arg0, arg1, arg2, arg3)
}
// NetworkInspect mocks base method.
-func (m *MockAPIClient) NetworkInspect(arg0 context.Context, arg1 string, arg2 types.NetworkInspectOptions) (types.NetworkResource, error) {
+func (m *MockAPIClient) NetworkInspect(arg0 context.Context, arg1 string, arg2 network.InspectOptions) (network.Inspect, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetworkInspect", arg0, arg1, arg2)
- ret0, _ := ret[0].(types.NetworkResource)
+ ret0, _ := ret[0].(network.Inspect)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NetworkInspect indicates an expected call of NetworkInspect.
-func (mr *MockAPIClientMockRecorder) NetworkInspect(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NetworkInspect(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkInspect", reflect.TypeOf((*MockAPIClient)(nil).NetworkInspect), arg0, arg1, arg2)
}
// NetworkInspectWithRaw mocks base method.
-func (m *MockAPIClient) NetworkInspectWithRaw(arg0 context.Context, arg1 string, arg2 types.NetworkInspectOptions) (types.NetworkResource, []byte, error) {
+func (m *MockAPIClient) NetworkInspectWithRaw(arg0 context.Context, arg1 string, arg2 network.InspectOptions) (network.Inspect, []byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetworkInspectWithRaw", arg0, arg1, arg2)
- ret0, _ := ret[0].(types.NetworkResource)
+ ret0, _ := ret[0].(network.Inspect)
ret1, _ := ret[1].([]byte)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// NetworkInspectWithRaw indicates an expected call of NetworkInspectWithRaw.
-func (mr *MockAPIClientMockRecorder) NetworkInspectWithRaw(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NetworkInspectWithRaw(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkInspectWithRaw", reflect.TypeOf((*MockAPIClient)(nil).NetworkInspectWithRaw), arg0, arg1, arg2)
}
// NetworkList mocks base method.
-func (m *MockAPIClient) NetworkList(arg0 context.Context, arg1 types.NetworkListOptions) ([]types.NetworkResource, error) {
+func (m *MockAPIClient) NetworkList(arg0 context.Context, arg1 network.ListOptions) ([]network.Inspect, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetworkList", arg0, arg1)
- ret0, _ := ret[0].([]types.NetworkResource)
+ ret0, _ := ret[0].([]network.Inspect)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NetworkList indicates an expected call of NetworkList.
-func (mr *MockAPIClientMockRecorder) NetworkList(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NetworkList(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkList", reflect.TypeOf((*MockAPIClient)(nil).NetworkList), arg0, arg1)
}
@@ -1141,22 +1185,22 @@ func (m *MockAPIClient) NetworkRemove(arg0 context.Context, arg1 string) error {
}
// NetworkRemove indicates an expected call of NetworkRemove.
-func (mr *MockAPIClientMockRecorder) NetworkRemove(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NetworkRemove(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkRemove", reflect.TypeOf((*MockAPIClient)(nil).NetworkRemove), arg0, arg1)
}
// NetworksPrune mocks base method.
-func (m *MockAPIClient) NetworksPrune(arg0 context.Context, arg1 filters.Args) (types.NetworksPruneReport, error) {
+func (m *MockAPIClient) NetworksPrune(arg0 context.Context, arg1 filters.Args) (network.PruneReport, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetworksPrune", arg0, arg1)
- ret0, _ := ret[0].(types.NetworksPruneReport)
+ ret0, _ := ret[0].(network.PruneReport)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NetworksPrune indicates an expected call of NetworksPrune.
-func (mr *MockAPIClientMockRecorder) NetworksPrune(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NetworksPrune(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworksPrune", reflect.TypeOf((*MockAPIClient)(nil).NetworksPrune), arg0, arg1)
}
@@ -1172,13 +1216,13 @@ func (m *MockAPIClient) NodeInspectWithRaw(arg0 context.Context, arg1 string) (s
}
// NodeInspectWithRaw indicates an expected call of NodeInspectWithRaw.
-func (mr *MockAPIClientMockRecorder) NodeInspectWithRaw(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NodeInspectWithRaw(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeInspectWithRaw", reflect.TypeOf((*MockAPIClient)(nil).NodeInspectWithRaw), arg0, arg1)
}
// NodeList mocks base method.
-func (m *MockAPIClient) NodeList(arg0 context.Context, arg1 types.NodeListOptions) ([]swarm.Node, error) {
+func (m *MockAPIClient) NodeList(arg0 context.Context, arg1 swarm.NodeListOptions) ([]swarm.Node, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NodeList", arg0, arg1)
ret0, _ := ret[0].([]swarm.Node)
@@ -1187,13 +1231,13 @@ func (m *MockAPIClient) NodeList(arg0 context.Context, arg1 types.NodeListOption
}
// NodeList indicates an expected call of NodeList.
-func (mr *MockAPIClientMockRecorder) NodeList(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NodeList(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeList", reflect.TypeOf((*MockAPIClient)(nil).NodeList), arg0, arg1)
}
// NodeRemove mocks base method.
-func (m *MockAPIClient) NodeRemove(arg0 context.Context, arg1 string, arg2 types.NodeRemoveOptions) error {
+func (m *MockAPIClient) NodeRemove(arg0 context.Context, arg1 string, arg2 swarm.NodeRemoveOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NodeRemove", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
@@ -1201,7 +1245,7 @@ func (m *MockAPIClient) NodeRemove(arg0 context.Context, arg1 string, arg2 types
}
// NodeRemove indicates an expected call of NodeRemove.
-func (mr *MockAPIClientMockRecorder) NodeRemove(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NodeRemove(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeRemove", reflect.TypeOf((*MockAPIClient)(nil).NodeRemove), arg0, arg1, arg2)
}
@@ -1215,7 +1259,7 @@ func (m *MockAPIClient) NodeUpdate(arg0 context.Context, arg1 string, arg2 swarm
}
// NodeUpdate indicates an expected call of NodeUpdate.
-func (mr *MockAPIClientMockRecorder) NodeUpdate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) NodeUpdate(arg0, arg1, arg2, arg3 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUpdate", reflect.TypeOf((*MockAPIClient)(nil).NodeUpdate), arg0, arg1, arg2, arg3)
}
@@ -1230,7 +1274,7 @@ func (m *MockAPIClient) Ping(arg0 context.Context) (types.Ping, error) {
}
// Ping indicates an expected call of Ping.
-func (mr *MockAPIClientMockRecorder) Ping(arg0 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) Ping(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockAPIClient)(nil).Ping), arg0)
}
@@ -1244,7 +1288,7 @@ func (m *MockAPIClient) PluginCreate(arg0 context.Context, arg1 io.Reader, arg2
}
// PluginCreate indicates an expected call of PluginCreate.
-func (mr *MockAPIClientMockRecorder) PluginCreate(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) PluginCreate(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PluginCreate", reflect.TypeOf((*MockAPIClient)(nil).PluginCreate), arg0, arg1, arg2)
}
@@ -1258,7 +1302,7 @@ func (m *MockAPIClient) PluginDisable(arg0 context.Context, arg1 string, arg2 ty
}
// PluginDisable indicates an expected call of PluginDisable.
-func (mr *MockAPIClientMockRecorder) PluginDisable(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) PluginDisable(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PluginDisable", reflect.TypeOf((*MockAPIClient)(nil).PluginDisable), arg0, arg1, arg2)
}
@@ -1272,7 +1316,7 @@ func (m *MockAPIClient) PluginEnable(arg0 context.Context, arg1 string, arg2 typ
}
// PluginEnable indicates an expected call of PluginEnable.
-func (mr *MockAPIClientMockRecorder) PluginEnable(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) PluginEnable(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PluginEnable", reflect.TypeOf((*MockAPIClient)(nil).PluginEnable), arg0, arg1, arg2)
}
@@ -1288,7 +1332,7 @@ func (m *MockAPIClient) PluginInspectWithRaw(arg0 context.Context, arg1 string)
}
// PluginInspectWithRaw indicates an expected call of PluginInspectWithRaw.
-func (mr *MockAPIClientMockRecorder) PluginInspectWithRaw(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) PluginInspectWithRaw(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PluginInspectWithRaw", reflect.TypeOf((*MockAPIClient)(nil).PluginInspectWithRaw), arg0, arg1)
}
@@ -1303,7 +1347,7 @@ func (m *MockAPIClient) PluginInstall(arg0 context.Context, arg1 string, arg2 ty
}
// PluginInstall indicates an expected call of PluginInstall.
-func (mr *MockAPIClientMockRecorder) PluginInstall(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) PluginInstall(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PluginInstall", reflect.TypeOf((*MockAPIClient)(nil).PluginInstall), arg0, arg1, arg2)
}
@@ -1318,7 +1362,7 @@ func (m *MockAPIClient) PluginList(arg0 context.Context, arg1 filters.Args) (typ
}
// PluginList indicates an expected call of PluginList.
-func (mr *MockAPIClientMockRecorder) PluginList(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) PluginList(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PluginList", reflect.TypeOf((*MockAPIClient)(nil).PluginList), arg0, arg1)
}
@@ -1333,7 +1377,7 @@ func (m *MockAPIClient) PluginPush(arg0 context.Context, arg1, arg2 string) (io.
}
// PluginPush indicates an expected call of PluginPush.
-func (mr *MockAPIClientMockRecorder) PluginPush(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) PluginPush(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PluginPush", reflect.TypeOf((*MockAPIClient)(nil).PluginPush), arg0, arg1, arg2)
}
@@ -1347,7 +1391,7 @@ func (m *MockAPIClient) PluginRemove(arg0 context.Context, arg1 string, arg2 typ
}
// PluginRemove indicates an expected call of PluginRemove.
-func (mr *MockAPIClientMockRecorder) PluginRemove(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) PluginRemove(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PluginRemove", reflect.TypeOf((*MockAPIClient)(nil).PluginRemove), arg0, arg1, arg2)
}
@@ -1361,7 +1405,7 @@ func (m *MockAPIClient) PluginSet(arg0 context.Context, arg1 string, arg2 []stri
}
// PluginSet indicates an expected call of PluginSet.
-func (mr *MockAPIClientMockRecorder) PluginSet(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) PluginSet(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PluginSet", reflect.TypeOf((*MockAPIClient)(nil).PluginSet), arg0, arg1, arg2)
}
@@ -1376,13 +1420,13 @@ func (m *MockAPIClient) PluginUpgrade(arg0 context.Context, arg1 string, arg2 ty
}
// PluginUpgrade indicates an expected call of PluginUpgrade.
-func (mr *MockAPIClientMockRecorder) PluginUpgrade(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) PluginUpgrade(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PluginUpgrade", reflect.TypeOf((*MockAPIClient)(nil).PluginUpgrade), arg0, arg1, arg2)
}
// RegistryLogin mocks base method.
-func (m *MockAPIClient) RegistryLogin(arg0 context.Context, arg1 types.AuthConfig) (registry.AuthenticateOKBody, error) {
+func (m *MockAPIClient) RegistryLogin(arg0 context.Context, arg1 registry.AuthConfig) (registry.AuthenticateOKBody, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RegistryLogin", arg0, arg1)
ret0, _ := ret[0].(registry.AuthenticateOKBody)
@@ -1391,22 +1435,22 @@ func (m *MockAPIClient) RegistryLogin(arg0 context.Context, arg1 types.AuthConfi
}
// RegistryLogin indicates an expected call of RegistryLogin.
-func (mr *MockAPIClientMockRecorder) RegistryLogin(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) RegistryLogin(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegistryLogin", reflect.TypeOf((*MockAPIClient)(nil).RegistryLogin), arg0, arg1)
}
// SecretCreate mocks base method.
-func (m *MockAPIClient) SecretCreate(arg0 context.Context, arg1 swarm.SecretSpec) (types.SecretCreateResponse, error) {
+func (m *MockAPIClient) SecretCreate(arg0 context.Context, arg1 swarm.SecretSpec) (swarm.SecretCreateResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SecretCreate", arg0, arg1)
- ret0, _ := ret[0].(types.SecretCreateResponse)
+ ret0, _ := ret[0].(swarm.SecretCreateResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SecretCreate indicates an expected call of SecretCreate.
-func (mr *MockAPIClientMockRecorder) SecretCreate(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) SecretCreate(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SecretCreate", reflect.TypeOf((*MockAPIClient)(nil).SecretCreate), arg0, arg1)
}
@@ -1422,13 +1466,13 @@ func (m *MockAPIClient) SecretInspectWithRaw(arg0 context.Context, arg1 string)
}
// SecretInspectWithRaw indicates an expected call of SecretInspectWithRaw.
-func (mr *MockAPIClientMockRecorder) SecretInspectWithRaw(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) SecretInspectWithRaw(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SecretInspectWithRaw", reflect.TypeOf((*MockAPIClient)(nil).SecretInspectWithRaw), arg0, arg1)
}
// SecretList mocks base method.
-func (m *MockAPIClient) SecretList(arg0 context.Context, arg1 types.SecretListOptions) ([]swarm.Secret, error) {
+func (m *MockAPIClient) SecretList(arg0 context.Context, arg1 swarm.SecretListOptions) ([]swarm.Secret, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SecretList", arg0, arg1)
ret0, _ := ret[0].([]swarm.Secret)
@@ -1437,7 +1481,7 @@ func (m *MockAPIClient) SecretList(arg0 context.Context, arg1 types.SecretListOp
}
// SecretList indicates an expected call of SecretList.
-func (mr *MockAPIClientMockRecorder) SecretList(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) SecretList(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SecretList", reflect.TypeOf((*MockAPIClient)(nil).SecretList), arg0, arg1)
}
@@ -1451,7 +1495,7 @@ func (m *MockAPIClient) SecretRemove(arg0 context.Context, arg1 string) error {
}
// SecretRemove indicates an expected call of SecretRemove.
-func (mr *MockAPIClientMockRecorder) SecretRemove(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) SecretRemove(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SecretRemove", reflect.TypeOf((*MockAPIClient)(nil).SecretRemove), arg0, arg1)
}
@@ -1465,7 +1509,7 @@ func (m *MockAPIClient) SecretUpdate(arg0 context.Context, arg1 string, arg2 swa
}
// SecretUpdate indicates an expected call of SecretUpdate.
-func (mr *MockAPIClientMockRecorder) SecretUpdate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) SecretUpdate(arg0, arg1, arg2, arg3 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SecretUpdate", reflect.TypeOf((*MockAPIClient)(nil).SecretUpdate), arg0, arg1, arg2, arg3)
}
@@ -1480,28 +1524,28 @@ func (m *MockAPIClient) ServerVersion(arg0 context.Context) (types.Version, erro
}
// ServerVersion indicates an expected call of ServerVersion.
-func (mr *MockAPIClientMockRecorder) ServerVersion(arg0 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ServerVersion(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServerVersion", reflect.TypeOf((*MockAPIClient)(nil).ServerVersion), arg0)
}
// ServiceCreate mocks base method.
-func (m *MockAPIClient) ServiceCreate(arg0 context.Context, arg1 swarm.ServiceSpec, arg2 types.ServiceCreateOptions) (types.ServiceCreateResponse, error) {
+func (m *MockAPIClient) ServiceCreate(arg0 context.Context, arg1 swarm.ServiceSpec, arg2 swarm.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ServiceCreate", arg0, arg1, arg2)
- ret0, _ := ret[0].(types.ServiceCreateResponse)
+ ret0, _ := ret[0].(swarm.ServiceCreateResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ServiceCreate indicates an expected call of ServiceCreate.
-func (mr *MockAPIClientMockRecorder) ServiceCreate(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ServiceCreate(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceCreate", reflect.TypeOf((*MockAPIClient)(nil).ServiceCreate), arg0, arg1, arg2)
}
// ServiceInspectWithRaw mocks base method.
-func (m *MockAPIClient) ServiceInspectWithRaw(arg0 context.Context, arg1 string, arg2 types.ServiceInspectOptions) (swarm.Service, []byte, error) {
+func (m *MockAPIClient) ServiceInspectWithRaw(arg0 context.Context, arg1 string, arg2 swarm.ServiceInspectOptions) (swarm.Service, []byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ServiceInspectWithRaw", arg0, arg1, arg2)
ret0, _ := ret[0].(swarm.Service)
@@ -1511,13 +1555,13 @@ func (m *MockAPIClient) ServiceInspectWithRaw(arg0 context.Context, arg1 string,
}
// ServiceInspectWithRaw indicates an expected call of ServiceInspectWithRaw.
-func (mr *MockAPIClientMockRecorder) ServiceInspectWithRaw(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ServiceInspectWithRaw(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceInspectWithRaw", reflect.TypeOf((*MockAPIClient)(nil).ServiceInspectWithRaw), arg0, arg1, arg2)
}
// ServiceList mocks base method.
-func (m *MockAPIClient) ServiceList(arg0 context.Context, arg1 types.ServiceListOptions) ([]swarm.Service, error) {
+func (m *MockAPIClient) ServiceList(arg0 context.Context, arg1 swarm.ServiceListOptions) ([]swarm.Service, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ServiceList", arg0, arg1)
ret0, _ := ret[0].([]swarm.Service)
@@ -1526,13 +1570,13 @@ func (m *MockAPIClient) ServiceList(arg0 context.Context, arg1 types.ServiceList
}
// ServiceList indicates an expected call of ServiceList.
-func (mr *MockAPIClientMockRecorder) ServiceList(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ServiceList(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceList", reflect.TypeOf((*MockAPIClient)(nil).ServiceList), arg0, arg1)
}
// ServiceLogs mocks base method.
-func (m *MockAPIClient) ServiceLogs(arg0 context.Context, arg1 string, arg2 types.ContainerLogsOptions) (io.ReadCloser, error) {
+func (m *MockAPIClient) ServiceLogs(arg0 context.Context, arg1 string, arg2 container.LogsOptions) (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ServiceLogs", arg0, arg1, arg2)
ret0, _ := ret[0].(io.ReadCloser)
@@ -1541,7 +1585,7 @@ func (m *MockAPIClient) ServiceLogs(arg0 context.Context, arg1 string, arg2 type
}
// ServiceLogs indicates an expected call of ServiceLogs.
-func (mr *MockAPIClientMockRecorder) ServiceLogs(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ServiceLogs(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceLogs", reflect.TypeOf((*MockAPIClient)(nil).ServiceLogs), arg0, arg1, arg2)
}
@@ -1555,37 +1599,37 @@ func (m *MockAPIClient) ServiceRemove(arg0 context.Context, arg1 string) error {
}
// ServiceRemove indicates an expected call of ServiceRemove.
-func (mr *MockAPIClientMockRecorder) ServiceRemove(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ServiceRemove(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceRemove", reflect.TypeOf((*MockAPIClient)(nil).ServiceRemove), arg0, arg1)
}
// ServiceUpdate mocks base method.
-func (m *MockAPIClient) ServiceUpdate(arg0 context.Context, arg1 string, arg2 swarm.Version, arg3 swarm.ServiceSpec, arg4 types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) {
+func (m *MockAPIClient) ServiceUpdate(arg0 context.Context, arg1 string, arg2 swarm.Version, arg3 swarm.ServiceSpec, arg4 swarm.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ServiceUpdate", arg0, arg1, arg2, arg3, arg4)
- ret0, _ := ret[0].(types.ServiceUpdateResponse)
+ ret0, _ := ret[0].(swarm.ServiceUpdateResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ServiceUpdate indicates an expected call of ServiceUpdate.
-func (mr *MockAPIClientMockRecorder) ServiceUpdate(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) ServiceUpdate(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceUpdate", reflect.TypeOf((*MockAPIClient)(nil).ServiceUpdate), arg0, arg1, arg2, arg3, arg4)
}
// SwarmGetUnlockKey mocks base method.
-func (m *MockAPIClient) SwarmGetUnlockKey(arg0 context.Context) (types.SwarmUnlockKeyResponse, error) {
+func (m *MockAPIClient) SwarmGetUnlockKey(arg0 context.Context) (swarm.UnlockKeyResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SwarmGetUnlockKey", arg0)
- ret0, _ := ret[0].(types.SwarmUnlockKeyResponse)
+ ret0, _ := ret[0].(swarm.UnlockKeyResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SwarmGetUnlockKey indicates an expected call of SwarmGetUnlockKey.
-func (mr *MockAPIClientMockRecorder) SwarmGetUnlockKey(arg0 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) SwarmGetUnlockKey(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SwarmGetUnlockKey", reflect.TypeOf((*MockAPIClient)(nil).SwarmGetUnlockKey), arg0)
}
@@ -1600,7 +1644,7 @@ func (m *MockAPIClient) SwarmInit(arg0 context.Context, arg1 swarm.InitRequest)
}
// SwarmInit indicates an expected call of SwarmInit.
-func (mr *MockAPIClientMockRecorder) SwarmInit(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) SwarmInit(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SwarmInit", reflect.TypeOf((*MockAPIClient)(nil).SwarmInit), arg0, arg1)
}
@@ -1615,7 +1659,7 @@ func (m *MockAPIClient) SwarmInspect(arg0 context.Context) (swarm.Swarm, error)
}
// SwarmInspect indicates an expected call of SwarmInspect.
-func (mr *MockAPIClientMockRecorder) SwarmInspect(arg0 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) SwarmInspect(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SwarmInspect", reflect.TypeOf((*MockAPIClient)(nil).SwarmInspect), arg0)
}
@@ -1629,7 +1673,7 @@ func (m *MockAPIClient) SwarmJoin(arg0 context.Context, arg1 swarm.JoinRequest)
}
// SwarmJoin indicates an expected call of SwarmJoin.
-func (mr *MockAPIClientMockRecorder) SwarmJoin(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) SwarmJoin(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SwarmJoin", reflect.TypeOf((*MockAPIClient)(nil).SwarmJoin), arg0, arg1)
}
@@ -1643,7 +1687,7 @@ func (m *MockAPIClient) SwarmLeave(arg0 context.Context, arg1 bool) error {
}
// SwarmLeave indicates an expected call of SwarmLeave.
-func (mr *MockAPIClientMockRecorder) SwarmLeave(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) SwarmLeave(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SwarmLeave", reflect.TypeOf((*MockAPIClient)(nil).SwarmLeave), arg0, arg1)
}
@@ -1657,7 +1701,7 @@ func (m *MockAPIClient) SwarmUnlock(arg0 context.Context, arg1 swarm.UnlockReque
}
// SwarmUnlock indicates an expected call of SwarmUnlock.
-func (mr *MockAPIClientMockRecorder) SwarmUnlock(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) SwarmUnlock(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SwarmUnlock", reflect.TypeOf((*MockAPIClient)(nil).SwarmUnlock), arg0, arg1)
}
@@ -1671,7 +1715,7 @@ func (m *MockAPIClient) SwarmUpdate(arg0 context.Context, arg1 swarm.Version, ar
}
// SwarmUpdate indicates an expected call of SwarmUpdate.
-func (mr *MockAPIClientMockRecorder) SwarmUpdate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) SwarmUpdate(arg0, arg1, arg2, arg3 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SwarmUpdate", reflect.TypeOf((*MockAPIClient)(nil).SwarmUpdate), arg0, arg1, arg2, arg3)
}
@@ -1687,13 +1731,13 @@ func (m *MockAPIClient) TaskInspectWithRaw(arg0 context.Context, arg1 string) (s
}
// TaskInspectWithRaw indicates an expected call of TaskInspectWithRaw.
-func (mr *MockAPIClientMockRecorder) TaskInspectWithRaw(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) TaskInspectWithRaw(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TaskInspectWithRaw", reflect.TypeOf((*MockAPIClient)(nil).TaskInspectWithRaw), arg0, arg1)
}
// TaskList mocks base method.
-func (m *MockAPIClient) TaskList(arg0 context.Context, arg1 types.TaskListOptions) ([]swarm.Task, error) {
+func (m *MockAPIClient) TaskList(arg0 context.Context, arg1 swarm.TaskListOptions) ([]swarm.Task, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TaskList", arg0, arg1)
ret0, _ := ret[0].([]swarm.Task)
@@ -1702,13 +1746,13 @@ func (m *MockAPIClient) TaskList(arg0 context.Context, arg1 types.TaskListOption
}
// TaskList indicates an expected call of TaskList.
-func (mr *MockAPIClientMockRecorder) TaskList(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) TaskList(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TaskList", reflect.TypeOf((*MockAPIClient)(nil).TaskList), arg0, arg1)
}
// TaskLogs mocks base method.
-func (m *MockAPIClient) TaskLogs(arg0 context.Context, arg1 string, arg2 types.ContainerLogsOptions) (io.ReadCloser, error) {
+func (m *MockAPIClient) TaskLogs(arg0 context.Context, arg1 string, arg2 container.LogsOptions) (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TaskLogs", arg0, arg1, arg2)
ret0, _ := ret[0].(io.ReadCloser)
@@ -1717,68 +1761,68 @@ func (m *MockAPIClient) TaskLogs(arg0 context.Context, arg1 string, arg2 types.C
}
// TaskLogs indicates an expected call of TaskLogs.
-func (mr *MockAPIClientMockRecorder) TaskLogs(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) TaskLogs(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TaskLogs", reflect.TypeOf((*MockAPIClient)(nil).TaskLogs), arg0, arg1, arg2)
}
// VolumeCreate mocks base method.
-func (m *MockAPIClient) VolumeCreate(arg0 context.Context, arg1 volume.VolumeCreateBody) (types.Volume, error) {
+func (m *MockAPIClient) VolumeCreate(arg0 context.Context, arg1 volume.CreateOptions) (volume.Volume, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "VolumeCreate", arg0, arg1)
- ret0, _ := ret[0].(types.Volume)
+ ret0, _ := ret[0].(volume.Volume)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// VolumeCreate indicates an expected call of VolumeCreate.
-func (mr *MockAPIClientMockRecorder) VolumeCreate(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) VolumeCreate(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumeCreate", reflect.TypeOf((*MockAPIClient)(nil).VolumeCreate), arg0, arg1)
}
// VolumeInspect mocks base method.
-func (m *MockAPIClient) VolumeInspect(arg0 context.Context, arg1 string) (types.Volume, error) {
+func (m *MockAPIClient) VolumeInspect(arg0 context.Context, arg1 string) (volume.Volume, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "VolumeInspect", arg0, arg1)
- ret0, _ := ret[0].(types.Volume)
+ ret0, _ := ret[0].(volume.Volume)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// VolumeInspect indicates an expected call of VolumeInspect.
-func (mr *MockAPIClientMockRecorder) VolumeInspect(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) VolumeInspect(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumeInspect", reflect.TypeOf((*MockAPIClient)(nil).VolumeInspect), arg0, arg1)
}
// VolumeInspectWithRaw mocks base method.
-func (m *MockAPIClient) VolumeInspectWithRaw(arg0 context.Context, arg1 string) (types.Volume, []byte, error) {
+func (m *MockAPIClient) VolumeInspectWithRaw(arg0 context.Context, arg1 string) (volume.Volume, []byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "VolumeInspectWithRaw", arg0, arg1)
- ret0, _ := ret[0].(types.Volume)
+ ret0, _ := ret[0].(volume.Volume)
ret1, _ := ret[1].([]byte)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// VolumeInspectWithRaw indicates an expected call of VolumeInspectWithRaw.
-func (mr *MockAPIClientMockRecorder) VolumeInspectWithRaw(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) VolumeInspectWithRaw(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumeInspectWithRaw", reflect.TypeOf((*MockAPIClient)(nil).VolumeInspectWithRaw), arg0, arg1)
}
// VolumeList mocks base method.
-func (m *MockAPIClient) VolumeList(arg0 context.Context, arg1 filters.Args) (volume.VolumeListOKBody, error) {
+func (m *MockAPIClient) VolumeList(arg0 context.Context, arg1 volume.ListOptions) (volume.ListResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "VolumeList", arg0, arg1)
- ret0, _ := ret[0].(volume.VolumeListOKBody)
+ ret0, _ := ret[0].(volume.ListResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// VolumeList indicates an expected call of VolumeList.
-func (mr *MockAPIClientMockRecorder) VolumeList(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) VolumeList(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumeList", reflect.TypeOf((*MockAPIClient)(nil).VolumeList), arg0, arg1)
}
@@ -1792,22 +1836,36 @@ func (m *MockAPIClient) VolumeRemove(arg0 context.Context, arg1 string, arg2 boo
}
// VolumeRemove indicates an expected call of VolumeRemove.
-func (mr *MockAPIClientMockRecorder) VolumeRemove(arg0, arg1, arg2 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) VolumeRemove(arg0, arg1, arg2 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumeRemove", reflect.TypeOf((*MockAPIClient)(nil).VolumeRemove), arg0, arg1, arg2)
}
+// VolumeUpdate mocks base method.
+func (m *MockAPIClient) VolumeUpdate(arg0 context.Context, arg1 string, arg2 swarm.Version, arg3 volume.UpdateOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "VolumeUpdate", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// VolumeUpdate indicates an expected call of VolumeUpdate.
+func (mr *MockAPIClientMockRecorder) VolumeUpdate(arg0, arg1, arg2, arg3 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumeUpdate", reflect.TypeOf((*MockAPIClient)(nil).VolumeUpdate), arg0, arg1, arg2, arg3)
+}
+
// VolumesPrune mocks base method.
-func (m *MockAPIClient) VolumesPrune(arg0 context.Context, arg1 filters.Args) (types.VolumesPruneReport, error) {
+func (m *MockAPIClient) VolumesPrune(arg0 context.Context, arg1 filters.Args) (volume.PruneReport, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "VolumesPrune", arg0, arg1)
- ret0, _ := ret[0].(types.VolumesPruneReport)
+ ret0, _ := ret[0].(volume.PruneReport)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// VolumesPrune indicates an expected call of VolumesPrune.
-func (mr *MockAPIClientMockRecorder) VolumesPrune(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockAPIClientMockRecorder) VolumesPrune(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumesPrune", reflect.TypeOf((*MockAPIClient)(nil).VolumesPrune), arg0, arg1)
}
diff --git a/pkg/mocks/mock_docker_cli.go b/pkg/mocks/mock_docker_cli.go
index 5162883d5db..663c57ee928 100644
--- a/pkg/mocks/mock_docker_cli.go
+++ b/pkg/mocks/mock_docker_cli.go
@@ -1,24 +1,27 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/docker/cli/cli/command (interfaces: Cli)
+//
+// Generated by this command:
+//
+// mockgen -destination pkg/mocks/mock_docker_cli.go -package mocks github.com/docker/cli/cli/command Cli
+//
// Package mocks is a generated GoMock package.
package mocks
import (
- io "io"
reflect "reflect"
command "github.com/docker/cli/cli/command"
configfile "github.com/docker/cli/cli/config/configfile"
docker "github.com/docker/cli/cli/context/docker"
store "github.com/docker/cli/cli/context/store"
- store0 "github.com/docker/cli/cli/manifest/store"
- client "github.com/docker/cli/cli/registry/client"
streams "github.com/docker/cli/cli/streams"
- trust "github.com/docker/cli/cli/trust"
- client0 "github.com/docker/docker/client"
- gomock "github.com/golang/mock/gomock"
- client1 "github.com/theupdateframework/notary/client"
+ client "github.com/docker/docker/client"
+ metric "go.opentelemetry.io/otel/metric"
+ resource "go.opentelemetry.io/otel/sdk/resource"
+ trace "go.opentelemetry.io/otel/trace"
+ gomock "go.uber.org/mock/gomock"
)
// MockCli is a mock of Cli interface.
@@ -45,9 +48,9 @@ func (m *MockCli) EXPECT() *MockCliMockRecorder {
}
// Apply mocks base method.
-func (m *MockCli) Apply(arg0 ...command.DockerCliOption) error {
+func (m *MockCli) Apply(arg0 ...command.CLIOption) error {
m.ctrl.T.Helper()
- varargs := []interface{}{}
+ varargs := []any{}
for _, a := range arg0 {
varargs = append(varargs, a)
}
@@ -57,7 +60,7 @@ func (m *MockCli) Apply(arg0 ...command.DockerCliOption) error {
}
// Apply indicates an expected call of Apply.
-func (mr *MockCliMockRecorder) Apply(arg0 ...interface{}) *gomock.Call {
+func (mr *MockCliMockRecorder) Apply(arg0 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockCli)(nil).Apply), arg0...)
}
@@ -78,10 +81,10 @@ func (mr *MockCliMockRecorder) BuildKitEnabled() *gomock.Call {
}
// Client mocks base method.
-func (m *MockCli) Client() client0.APIClient {
+func (m *MockCli) Client() client.APIClient {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Client")
- ret0, _ := ret[0].(client0.APIClient)
+ ret0, _ := ret[0].(client.APIClient)
return ret0
}
@@ -91,20 +94,6 @@ func (mr *MockCliMockRecorder) Client() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Client", reflect.TypeOf((*MockCli)(nil).Client))
}
-// ClientInfo mocks base method.
-func (m *MockCli) ClientInfo() command.ClientInfo {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ClientInfo")
- ret0, _ := ret[0].(command.ClientInfo)
- return ret0
-}
-
-// ClientInfo indicates an expected call of ClientInfo.
-func (mr *MockCliMockRecorder) ClientInfo() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientInfo", reflect.TypeOf((*MockCli)(nil).ClientInfo))
-}
-
// ConfigFile mocks base method.
func (m *MockCli) ConfigFile() *configfile.ConfigFile {
m.ctrl.T.Helper()
@@ -119,20 +108,6 @@ func (mr *MockCliMockRecorder) ConfigFile() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigFile", reflect.TypeOf((*MockCli)(nil).ConfigFile))
}
-// ContentTrustEnabled mocks base method.
-func (m *MockCli) ContentTrustEnabled() bool {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ContentTrustEnabled")
- ret0, _ := ret[0].(bool)
- return ret0
-}
-
-// ContentTrustEnabled indicates an expected call of ContentTrustEnabled.
-func (mr *MockCliMockRecorder) ContentTrustEnabled() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContentTrustEnabled", reflect.TypeOf((*MockCli)(nil).ContentTrustEnabled))
-}
-
// ContextStore mocks base method.
func (m *MockCli) ContextStore() store.Store {
m.ctrl.T.Helper()
@@ -161,18 +136,18 @@ func (mr *MockCliMockRecorder) CurrentContext() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentContext", reflect.TypeOf((*MockCli)(nil).CurrentContext))
}
-// DefaultVersion mocks base method.
-func (m *MockCli) DefaultVersion() string {
+// CurrentVersion mocks base method.
+func (m *MockCli) CurrentVersion() string {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "DefaultVersion")
+ ret := m.ctrl.Call(m, "CurrentVersion")
ret0, _ := ret[0].(string)
return ret0
}
-// DefaultVersion indicates an expected call of DefaultVersion.
-func (mr *MockCliMockRecorder) DefaultVersion() *gomock.Call {
+// CurrentVersion indicates an expected call of CurrentVersion.
+func (mr *MockCliMockRecorder) CurrentVersion() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DefaultVersion", reflect.TypeOf((*MockCli)(nil).DefaultVersion))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentVersion", reflect.TypeOf((*MockCli)(nil).CurrentVersion))
}
// DockerEndpoint mocks base method.
@@ -190,10 +165,10 @@ func (mr *MockCliMockRecorder) DockerEndpoint() *gomock.Call {
}
// Err mocks base method.
-func (m *MockCli) Err() io.Writer {
+func (m *MockCli) Err() *streams.Out {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Err")
- ret0, _ := ret[0].(io.Writer)
+ ret0, _ := ret[0].(*streams.Out)
return ret0
}
@@ -217,33 +192,18 @@ func (mr *MockCliMockRecorder) In() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "In", reflect.TypeOf((*MockCli)(nil).In))
}
-// ManifestStore mocks base method.
-func (m *MockCli) ManifestStore() store0.Store {
+// MeterProvider mocks base method.
+func (m *MockCli) MeterProvider() metric.MeterProvider {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ManifestStore")
- ret0, _ := ret[0].(store0.Store)
+ ret := m.ctrl.Call(m, "MeterProvider")
+ ret0, _ := ret[0].(metric.MeterProvider)
return ret0
}
-// ManifestStore indicates an expected call of ManifestStore.
-func (mr *MockCliMockRecorder) ManifestStore() *gomock.Call {
+// MeterProvider indicates an expected call of MeterProvider.
+func (mr *MockCliMockRecorder) MeterProvider() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ManifestStore", reflect.TypeOf((*MockCli)(nil).ManifestStore))
-}
-
-// NotaryClient mocks base method.
-func (m *MockCli) NotaryClient(arg0 trust.ImageRefAndAuth, arg1 []string) (client1.Repository, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "NotaryClient", arg0, arg1)
- ret0, _ := ret[0].(client1.Repository)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// NotaryClient indicates an expected call of NotaryClient.
-func (mr *MockCliMockRecorder) NotaryClient(arg0, arg1 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NotaryClient", reflect.TypeOf((*MockCli)(nil).NotaryClient), arg0, arg1)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MeterProvider", reflect.TypeOf((*MockCli)(nil).MeterProvider))
}
// Out mocks base method.
@@ -260,18 +220,18 @@ func (mr *MockCliMockRecorder) Out() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Out", reflect.TypeOf((*MockCli)(nil).Out))
}
-// RegistryClient mocks base method.
-func (m *MockCli) RegistryClient(arg0 bool) client.RegistryClient {
+// Resource mocks base method.
+func (m *MockCli) Resource() *resource.Resource {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "RegistryClient", arg0)
- ret0, _ := ret[0].(client.RegistryClient)
+ ret := m.ctrl.Call(m, "Resource")
+ ret0, _ := ret[0].(*resource.Resource)
return ret0
}
-// RegistryClient indicates an expected call of RegistryClient.
-func (mr *MockCliMockRecorder) RegistryClient(arg0 interface{}) *gomock.Call {
+// Resource indicates an expected call of Resource.
+func (mr *MockCliMockRecorder) Resource() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegistryClient", reflect.TypeOf((*MockCli)(nil).RegistryClient), arg0)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Resource", reflect.TypeOf((*MockCli)(nil).Resource))
}
// ServerInfo mocks base method.
@@ -295,7 +255,21 @@ func (m *MockCli) SetIn(arg0 *streams.In) {
}
// SetIn indicates an expected call of SetIn.
-func (mr *MockCliMockRecorder) SetIn(arg0 interface{}) *gomock.Call {
+func (mr *MockCliMockRecorder) SetIn(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIn", reflect.TypeOf((*MockCli)(nil).SetIn), arg0)
}
+
+// TracerProvider mocks base method.
+func (m *MockCli) TracerProvider() trace.TracerProvider {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TracerProvider")
+ ret0, _ := ret[0].(trace.TracerProvider)
+ return ret0
+}
+
+// TracerProvider indicates an expected call of TracerProvider.
+func (mr *MockCliMockRecorder) TracerProvider() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TracerProvider", reflect.TypeOf((*MockCli)(nil).TracerProvider))
+}
diff --git a/pkg/mocks/mock_docker_compose_api.go b/pkg/mocks/mock_docker_compose_api.go
new file mode 100644
index 00000000000..db6ddb92ec4
--- /dev/null
+++ b/pkg/mocks/mock_docker_compose_api.go
@@ -0,0 +1,590 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: ./pkg/api/api.go
+//
+// Generated by this command:
+//
+// mockgen -destination pkg/mocks/mock_docker_compose_api.go -package mocks -source=./pkg/api/api.go Service
+//
+
+// Package mocks is a generated GoMock package.
+package mocks
+
+import (
+ context "context"
+ reflect "reflect"
+
+ types "github.com/compose-spec/compose-go/v2/types"
+ api "github.com/docker/compose/v5/pkg/api"
+ gomock "go.uber.org/mock/gomock"
+)
+
+// MockCompose is a mock of Compose interface.
+type MockCompose struct {
+ ctrl *gomock.Controller
+ recorder *MockComposeMockRecorder
+}
+
+// MockComposeMockRecorder is the mock recorder for MockCompose.
+type MockComposeMockRecorder struct {
+ mock *MockCompose
+}
+
+// NewMockCompose creates a new mock instance.
+func NewMockCompose(ctrl *gomock.Controller) *MockCompose {
+ mock := &MockCompose{ctrl: ctrl}
+ mock.recorder = &MockComposeMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockCompose) EXPECT() *MockComposeMockRecorder {
+ return m.recorder
+}
+
+// Attach mocks base method.
+func (m *MockCompose) Attach(ctx context.Context, projectName string, options api.AttachOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Attach", ctx, projectName, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Attach indicates an expected call of Attach.
+func (mr *MockComposeMockRecorder) Attach(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attach", reflect.TypeOf((*MockCompose)(nil).Attach), ctx, projectName, options)
+}
+
+// Build mocks base method.
+func (m *MockCompose) Build(ctx context.Context, project *types.Project, options api.BuildOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Build", ctx, project, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Build indicates an expected call of Build.
+func (mr *MockComposeMockRecorder) Build(ctx, project, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Build", reflect.TypeOf((*MockCompose)(nil).Build), ctx, project, options)
+}
+
+// Commit mocks base method.
+func (m *MockCompose) Commit(ctx context.Context, projectName string, options api.CommitOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Commit", ctx, projectName, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Commit indicates an expected call of Commit.
+func (mr *MockComposeMockRecorder) Commit(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockCompose)(nil).Commit), ctx, projectName, options)
+}
+
+// Copy mocks base method.
+func (m *MockCompose) Copy(ctx context.Context, projectName string, options api.CopyOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Copy", ctx, projectName, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Copy indicates an expected call of Copy.
+func (mr *MockComposeMockRecorder) Copy(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Copy", reflect.TypeOf((*MockCompose)(nil).Copy), ctx, projectName, options)
+}
+
+// Create mocks base method.
+func (m *MockCompose) Create(ctx context.Context, project *types.Project, options api.CreateOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Create", ctx, project, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Create indicates an expected call of Create.
+func (mr *MockComposeMockRecorder) Create(ctx, project, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockCompose)(nil).Create), ctx, project, options)
+}
+
+// Down mocks base method.
+func (m *MockCompose) Down(ctx context.Context, projectName string, options api.DownOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Down", ctx, projectName, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Down indicates an expected call of Down.
+func (mr *MockComposeMockRecorder) Down(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Down", reflect.TypeOf((*MockCompose)(nil).Down), ctx, projectName, options)
+}
+
+// Events mocks base method.
+func (m *MockCompose) Events(ctx context.Context, projectName string, options api.EventsOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Events", ctx, projectName, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Events indicates an expected call of Events.
+func (mr *MockComposeMockRecorder) Events(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Events", reflect.TypeOf((*MockCompose)(nil).Events), ctx, projectName, options)
+}
+
+// Exec mocks base method.
+func (m *MockCompose) Exec(ctx context.Context, projectName string, options api.RunOptions) (int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Exec", ctx, projectName, options)
+ ret0, _ := ret[0].(int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Exec indicates an expected call of Exec.
+func (mr *MockComposeMockRecorder) Exec(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockCompose)(nil).Exec), ctx, projectName, options)
+}
+
+// Export mocks base method.
+func (m *MockCompose) Export(ctx context.Context, projectName string, options api.ExportOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Export", ctx, projectName, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Export indicates an expected call of Export.
+func (mr *MockComposeMockRecorder) Export(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Export", reflect.TypeOf((*MockCompose)(nil).Export), ctx, projectName, options)
+}
+
+// Generate mocks base method.
+func (m *MockCompose) Generate(ctx context.Context, options api.GenerateOptions) (*types.Project, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Generate", ctx, options)
+ ret0, _ := ret[0].(*types.Project)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Generate indicates an expected call of Generate.
+func (mr *MockComposeMockRecorder) Generate(ctx, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Generate", reflect.TypeOf((*MockCompose)(nil).Generate), ctx, options)
+}
+
+// Images mocks base method.
+func (m *MockCompose) Images(ctx context.Context, projectName string, options api.ImagesOptions) (map[string]api.ImageSummary, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Images", ctx, projectName, options)
+ ret0, _ := ret[0].(map[string]api.ImageSummary)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Images indicates an expected call of Images.
+func (mr *MockComposeMockRecorder) Images(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Images", reflect.TypeOf((*MockCompose)(nil).Images), ctx, projectName, options)
+}
+
+// Kill mocks base method.
+func (m *MockCompose) Kill(ctx context.Context, projectName string, options api.KillOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Kill", ctx, projectName, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Kill indicates an expected call of Kill.
+func (mr *MockComposeMockRecorder) Kill(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Kill", reflect.TypeOf((*MockCompose)(nil).Kill), ctx, projectName, options)
+}
+
+// List mocks base method.
+func (m *MockCompose) List(ctx context.Context, options api.ListOptions) ([]api.Stack, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "List", ctx, options)
+ ret0, _ := ret[0].([]api.Stack)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// List indicates an expected call of List.
+func (mr *MockComposeMockRecorder) List(ctx, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockCompose)(nil).List), ctx, options)
+}
+
+// LoadProject mocks base method.
+func (m *MockCompose) LoadProject(ctx context.Context, options api.ProjectLoadOptions) (*types.Project, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "LoadProject", ctx, options)
+ ret0, _ := ret[0].(*types.Project)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// LoadProject indicates an expected call of LoadProject.
+func (mr *MockComposeMockRecorder) LoadProject(ctx, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadProject", reflect.TypeOf((*MockCompose)(nil).LoadProject), ctx, options)
+}
+
+// Logs mocks base method.
+func (m *MockCompose) Logs(ctx context.Context, projectName string, consumer api.LogConsumer, options api.LogOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Logs", ctx, projectName, consumer, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Logs indicates an expected call of Logs.
+func (mr *MockComposeMockRecorder) Logs(ctx, projectName, consumer, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Logs", reflect.TypeOf((*MockCompose)(nil).Logs), ctx, projectName, consumer, options)
+}
+
+// Pause mocks base method.
+func (m *MockCompose) Pause(ctx context.Context, projectName string, options api.PauseOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Pause", ctx, projectName, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Pause indicates an expected call of Pause.
+func (mr *MockComposeMockRecorder) Pause(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pause", reflect.TypeOf((*MockCompose)(nil).Pause), ctx, projectName, options)
+}
+
+// Port mocks base method.
+func (m *MockCompose) Port(ctx context.Context, projectName, service string, port uint16, options api.PortOptions) (string, int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Port", ctx, projectName, service, port, options)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(int)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
+}
+
+// Port indicates an expected call of Port.
+func (mr *MockComposeMockRecorder) Port(ctx, projectName, service, port, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Port", reflect.TypeOf((*MockCompose)(nil).Port), ctx, projectName, service, port, options)
+}
+
+// Ps mocks base method.
+func (m *MockCompose) Ps(ctx context.Context, projectName string, options api.PsOptions) ([]api.ContainerSummary, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Ps", ctx, projectName, options)
+ ret0, _ := ret[0].([]api.ContainerSummary)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Ps indicates an expected call of Ps.
+func (mr *MockComposeMockRecorder) Ps(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ps", reflect.TypeOf((*MockCompose)(nil).Ps), ctx, projectName, options)
+}
+
+// Publish mocks base method.
+func (m *MockCompose) Publish(ctx context.Context, project *types.Project, repository string, options api.PublishOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Publish", ctx, project, repository, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Publish indicates an expected call of Publish.
+func (mr *MockComposeMockRecorder) Publish(ctx, project, repository, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Publish", reflect.TypeOf((*MockCompose)(nil).Publish), ctx, project, repository, options)
+}
+
+// Pull mocks base method.
+func (m *MockCompose) Pull(ctx context.Context, project *types.Project, options api.PullOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Pull", ctx, project, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Pull indicates an expected call of Pull.
+func (mr *MockComposeMockRecorder) Pull(ctx, project, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pull", reflect.TypeOf((*MockCompose)(nil).Pull), ctx, project, options)
+}
+
+// Push mocks base method.
+func (m *MockCompose) Push(ctx context.Context, project *types.Project, options api.PushOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Push", ctx, project, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Push indicates an expected call of Push.
+func (mr *MockComposeMockRecorder) Push(ctx, project, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Push", reflect.TypeOf((*MockCompose)(nil).Push), ctx, project, options)
+}
+
+// Remove mocks base method.
+func (m *MockCompose) Remove(ctx context.Context, projectName string, options api.RemoveOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Remove", ctx, projectName, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Remove indicates an expected call of Remove.
+func (mr *MockComposeMockRecorder) Remove(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockCompose)(nil).Remove), ctx, projectName, options)
+}
+
+// Restart mocks base method.
+func (m *MockCompose) Restart(ctx context.Context, projectName string, options api.RestartOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Restart", ctx, projectName, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Restart indicates an expected call of Restart.
+func (mr *MockComposeMockRecorder) Restart(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Restart", reflect.TypeOf((*MockCompose)(nil).Restart), ctx, projectName, options)
+}
+
+// RunOneOffContainer mocks base method.
+func (m *MockCompose) RunOneOffContainer(ctx context.Context, project *types.Project, opts api.RunOptions) (int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RunOneOffContainer", ctx, project, opts)
+ ret0, _ := ret[0].(int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RunOneOffContainer indicates an expected call of RunOneOffContainer.
+func (mr *MockComposeMockRecorder) RunOneOffContainer(ctx, project, opts any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunOneOffContainer", reflect.TypeOf((*MockCompose)(nil).RunOneOffContainer), ctx, project, opts)
+}
+
+// Scale mocks base method.
+func (m *MockCompose) Scale(ctx context.Context, project *types.Project, options api.ScaleOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Scale", ctx, project, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Scale indicates an expected call of Scale.
+func (mr *MockComposeMockRecorder) Scale(ctx, project, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scale", reflect.TypeOf((*MockCompose)(nil).Scale), ctx, project, options)
+}
+
+// Start mocks base method.
+func (m *MockCompose) Start(ctx context.Context, projectName string, options api.StartOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Start", ctx, projectName, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Start indicates an expected call of Start.
+func (mr *MockComposeMockRecorder) Start(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockCompose)(nil).Start), ctx, projectName, options)
+}
+
+// Stop mocks base method.
+func (m *MockCompose) Stop(ctx context.Context, projectName string, options api.StopOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Stop", ctx, projectName, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Stop indicates an expected call of Stop.
+func (mr *MockComposeMockRecorder) Stop(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockCompose)(nil).Stop), ctx, projectName, options)
+}
+
+// Top mocks base method.
+func (m *MockCompose) Top(ctx context.Context, projectName string, services []string) ([]api.ContainerProcSummary, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Top", ctx, projectName, services)
+ ret0, _ := ret[0].([]api.ContainerProcSummary)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Top indicates an expected call of Top.
+func (mr *MockComposeMockRecorder) Top(ctx, projectName, services any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Top", reflect.TypeOf((*MockCompose)(nil).Top), ctx, projectName, services)
+}
+
+// UnPause mocks base method.
+func (m *MockCompose) UnPause(ctx context.Context, projectName string, options api.PauseOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "UnPause", ctx, projectName, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// UnPause indicates an expected call of UnPause.
+func (mr *MockComposeMockRecorder) UnPause(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnPause", reflect.TypeOf((*MockCompose)(nil).UnPause), ctx, projectName, options)
+}
+
+// Up mocks base method.
+func (m *MockCompose) Up(ctx context.Context, project *types.Project, options api.UpOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Up", ctx, project, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Up indicates an expected call of Up.
+func (mr *MockComposeMockRecorder) Up(ctx, project, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Up", reflect.TypeOf((*MockCompose)(nil).Up), ctx, project, options)
+}
+
+// Viz mocks base method.
+func (m *MockCompose) Viz(ctx context.Context, project *types.Project, options api.VizOptions) (string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Viz", ctx, project, options)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Viz indicates an expected call of Viz.
+func (mr *MockComposeMockRecorder) Viz(ctx, project, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Viz", reflect.TypeOf((*MockCompose)(nil).Viz), ctx, project, options)
+}
+
+// Volumes mocks base method.
+func (m *MockCompose) Volumes(ctx context.Context, project string, options api.VolumesOptions) ([]api.VolumesSummary, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Volumes", ctx, project, options)
+ ret0, _ := ret[0].([]api.VolumesSummary)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Volumes indicates an expected call of Volumes.
+func (mr *MockComposeMockRecorder) Volumes(ctx, project, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Volumes", reflect.TypeOf((*MockCompose)(nil).Volumes), ctx, project, options)
+}
+
+// Wait mocks base method.
+func (m *MockCompose) Wait(ctx context.Context, projectName string, options api.WaitOptions) (int64, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Wait", ctx, projectName, options)
+ ret0, _ := ret[0].(int64)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Wait indicates an expected call of Wait.
+func (mr *MockComposeMockRecorder) Wait(ctx, projectName, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Wait", reflect.TypeOf((*MockCompose)(nil).Wait), ctx, projectName, options)
+}
+
+// Watch mocks base method.
+func (m *MockCompose) Watch(ctx context.Context, project *types.Project, options api.WatchOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Watch", ctx, project, options)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Watch indicates an expected call of Watch.
+func (mr *MockComposeMockRecorder) Watch(ctx, project, options any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockCompose)(nil).Watch), ctx, project, options)
+}
+
+// MockLogConsumer is a mock of LogConsumer interface.
+type MockLogConsumer struct {
+ ctrl *gomock.Controller
+ recorder *MockLogConsumerMockRecorder
+}
+
+// MockLogConsumerMockRecorder is the mock recorder for MockLogConsumer.
+type MockLogConsumerMockRecorder struct {
+ mock *MockLogConsumer
+}
+
+// NewMockLogConsumer creates a new mock instance.
+func NewMockLogConsumer(ctrl *gomock.Controller) *MockLogConsumer {
+ mock := &MockLogConsumer{ctrl: ctrl}
+ mock.recorder = &MockLogConsumerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockLogConsumer) EXPECT() *MockLogConsumerMockRecorder {
+ return m.recorder
+}
+
+// Err mocks base method.
+func (m *MockLogConsumer) Err(containerName, message string) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Err", containerName, message)
+}
+
+// Err indicates an expected call of Err.
+func (mr *MockLogConsumerMockRecorder) Err(containerName, message any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockLogConsumer)(nil).Err), containerName, message)
+}
+
+// Log mocks base method.
+func (m *MockLogConsumer) Log(containerName, message string) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Log", containerName, message)
+}
+
+// Log indicates an expected call of Log.
+func (mr *MockLogConsumerMockRecorder) Log(containerName, message any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Log", reflect.TypeOf((*MockLogConsumer)(nil).Log), containerName, message)
+}
+
+// Status mocks base method.
+func (m *MockLogConsumer) Status(container, msg string) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Status", container, msg)
+}
+
+// Status indicates an expected call of Status.
+func (mr *MockLogConsumerMockRecorder) Status(container, msg any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockLogConsumer)(nil).Status), container, msg)
+}
diff --git a/pkg/progress/event.go b/pkg/progress/event.go
deleted file mode 100644
index 5a013a8b169..00000000000
--- a/pkg/progress/event.go
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package progress
-
-import "time"
-
-// EventStatus indicates the status of an action
-type EventStatus int
-
-const (
- // Working means that the current task is working
- Working EventStatus = iota
- // Done means that the current task is done
- Done
- // Error means that the current task has errored
- Error
-)
-
-// Event represents a progress event.
-type Event struct {
- ID string
- ParentID string
- Text string
- Status EventStatus
- StatusText string
-
- startTime time.Time
- endTime time.Time
- spinner *spinner
-}
-
-// ErrorMessageEvent creates a new Error Event with message
-func ErrorMessageEvent(ID string, msg string) Event {
- return NewEvent(ID, Error, msg)
-}
-
-// ErrorEvent creates a new Error Event
-func ErrorEvent(ID string) Event {
- return NewEvent(ID, Error, "Error")
-}
-
-// CreatingEvent creates a new Create in progress Event
-func CreatingEvent(ID string) Event {
- return NewEvent(ID, Working, "Creating")
-}
-
-// StartingEvent creates a new Starting in progress Event
-func StartingEvent(ID string) Event {
- return NewEvent(ID, Working, "Starting")
-}
-
-// StartedEvent creates a new Started in progress Event
-func StartedEvent(ID string) Event {
- return NewEvent(ID, Done, "Started")
-}
-
-// Waiting creates a new waiting event
-func Waiting(ID string) Event {
- return NewEvent(ID, Working, "Waiting")
-}
-
-// Healthy creates a new healthy event
-func Healthy(ID string) Event {
- return NewEvent(ID, Done, "Healthy")
-}
-
-// Exited creates a new exited event
-func Exited(ID string) Event {
- return NewEvent(ID, Done, "Exited")
-}
-
-// RestartingEvent creates a new Restarting in progress Event
-func RestartingEvent(ID string) Event {
- return NewEvent(ID, Working, "Restarting")
-}
-
-// RestartedEvent creates a new Restarted in progress Event
-func RestartedEvent(ID string) Event {
- return NewEvent(ID, Done, "Restarted")
-}
-
-// RunningEvent creates a new Running in progress Event
-func RunningEvent(ID string) Event {
- return NewEvent(ID, Done, "Running")
-}
-
-// CreatedEvent creates a new Created (done) Event
-func CreatedEvent(ID string) Event {
- return NewEvent(ID, Done, "Created")
-}
-
-// StoppingEvent creates a new Stopping in progress Event
-func StoppingEvent(ID string) Event {
- return NewEvent(ID, Working, "Stopping")
-}
-
-// StoppedEvent creates a new Stopping in progress Event
-func StoppedEvent(ID string) Event {
- return NewEvent(ID, Done, "Stopped")
-}
-
-// KillingEvent creates a new Killing in progress Event
-func KillingEvent(ID string) Event {
- return NewEvent(ID, Working, "Killing")
-}
-
-// KilledEvent creates a new Killed in progress Event
-func KilledEvent(ID string) Event {
- return NewEvent(ID, Done, "Killed")
-}
-
-// RemovingEvent creates a new Removing in progress Event
-func RemovingEvent(ID string) Event {
- return NewEvent(ID, Working, "Removing")
-}
-
-// RemovedEvent creates a new removed (done) Event
-func RemovedEvent(ID string) Event {
- return NewEvent(ID, Done, "Removed")
-}
-
-// NewEvent new event
-func NewEvent(ID string, status EventStatus, statusText string) Event {
- return Event{
- ID: ID,
- Status: status,
- StatusText: statusText,
- }
-}
-
-func (e *Event) stop() {
- e.endTime = time.Now()
- e.spinner.Stop()
-}
diff --git a/pkg/progress/tty.go b/pkg/progress/tty.go
deleted file mode 100644
index 25cb7c78902..00000000000
--- a/pkg/progress/tty.go
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package progress
-
-import (
- "context"
- "fmt"
- "io"
- "runtime"
- "strings"
- "sync"
- "time"
-
- "github.com/docker/compose/v2/pkg/utils"
-
- "github.com/buger/goterm"
- "github.com/morikuni/aec"
-)
-
-type ttyWriter struct {
- out io.Writer
- events map[string]Event
- eventIDs []string
- repeated bool
- numLines int
- done chan bool
- mtx *sync.Mutex
- tailEvents []string
-}
-
-func (w *ttyWriter) Start(ctx context.Context) error {
- ticker := time.NewTicker(100 * time.Millisecond)
- defer ticker.Stop()
-
- for {
- select {
- case <-ctx.Done():
- w.print()
- w.printTailEvents()
- return ctx.Err()
- case <-w.done:
- w.print()
- w.printTailEvents()
- return nil
- case <-ticker.C:
- w.print()
- }
- }
-}
-
-func (w *ttyWriter) Stop() {
- w.done <- true
-}
-
-func (w *ttyWriter) Event(e Event) {
- w.mtx.Lock()
- defer w.mtx.Unlock()
- if !utils.StringContains(w.eventIDs, e.ID) {
- w.eventIDs = append(w.eventIDs, e.ID)
- }
- if _, ok := w.events[e.ID]; ok {
- last := w.events[e.ID]
- switch e.Status {
- case Done, Error:
- if last.Status != e.Status {
- last.stop()
- }
- }
- last.Status = e.Status
- last.Text = e.Text
- last.StatusText = e.StatusText
- last.ParentID = e.ParentID
- w.events[e.ID] = last
- } else {
- e.startTime = time.Now()
- e.spinner = newSpinner()
- if e.Status == Done || e.Status == Error {
- e.stop()
- }
- w.events[e.ID] = e
- }
-}
-
-func (w *ttyWriter) Events(events []Event) {
- for _, e := range events {
- w.Event(e)
- }
-}
-
-func (w *ttyWriter) TailMsgf(msg string, args ...interface{}) {
- w.mtx.Lock()
- defer w.mtx.Unlock()
- w.tailEvents = append(w.tailEvents, fmt.Sprintf(msg, args...))
-}
-
-func (w *ttyWriter) printTailEvents() {
- w.mtx.Lock()
- defer w.mtx.Unlock()
- for _, msg := range w.tailEvents {
- fmt.Fprintln(w.out, msg)
- }
-}
-
-func (w *ttyWriter) print() {
- w.mtx.Lock()
- defer w.mtx.Unlock()
- if len(w.eventIDs) == 0 {
- return
- }
- terminalWidth := goterm.Width()
- b := aec.EmptyBuilder
- for i := 0; i <= w.numLines; i++ {
- b = b.Up(1)
- }
- if !w.repeated {
- b = b.Down(1)
- }
- w.repeated = true
- fmt.Fprint(w.out, b.Column(0).ANSI)
-
- // Hide the cursor while we are printing
- fmt.Fprint(w.out, aec.Hide)
- defer fmt.Fprint(w.out, aec.Show)
-
- firstLine := fmt.Sprintf("[+] Running %d/%d", numDone(w.events), w.numLines)
- if w.numLines != 0 && numDone(w.events) == w.numLines {
- firstLine = aec.Apply(firstLine, aec.BlueF)
- }
- fmt.Fprintln(w.out, firstLine)
-
- var statusPadding int
- for _, v := range w.eventIDs {
- event := w.events[v]
- l := len(fmt.Sprintf("%s %s", event.ID, event.Text))
- if statusPadding < l {
- statusPadding = l
- }
- if event.ParentID != "" {
- statusPadding -= 2
- }
- }
-
- numLines := 0
- for _, v := range w.eventIDs {
- event := w.events[v]
- if event.ParentID != "" {
- continue
- }
- line := lineText(event, "", terminalWidth, statusPadding, runtime.GOOS != "windows")
- // nolint: errcheck
- fmt.Fprint(w.out, line)
- numLines++
- for _, v := range w.eventIDs {
- ev := w.events[v]
- if ev.ParentID == event.ID {
- line := lineText(ev, " ", terminalWidth, statusPadding, runtime.GOOS != "windows")
- // nolint: errcheck
- fmt.Fprint(w.out, line)
- numLines++
- }
- }
- }
-
- w.numLines = numLines
-}
-
-func lineText(event Event, pad string, terminalWidth, statusPadding int, color bool) string {
- endTime := time.Now()
- if event.Status != Working {
- endTime = event.startTime
- if (event.endTime != time.Time{}) {
- endTime = event.endTime
- }
- }
-
- elapsed := endTime.Sub(event.startTime).Seconds()
-
- textLen := len(fmt.Sprintf("%s %s", event.ID, event.Text))
- padding := statusPadding - textLen
- if padding < 0 {
- padding = 0
- }
- // calculate the max length for the status text, on errors it
- // is 2-3 lines long and breaks the line formatting
- maxStatusLen := terminalWidth - textLen - statusPadding - 15
- status := event.StatusText
- // in some cases (debugging under VS Code), terminalWidth is set to zero by goterm.Width() ; ensuring we don't tweak strings with negative char index
- if maxStatusLen > 0 && len(status) > maxStatusLen {
- status = status[:maxStatusLen] + "..."
- }
- text := fmt.Sprintf("%s %s %s %s%s %s",
- pad,
- event.spinner.String(),
- event.ID,
- event.Text,
- strings.Repeat(" ", padding),
- status,
- )
- timer := fmt.Sprintf("%.1fs\n", elapsed)
- o := align(text, timer, terminalWidth)
-
- if color {
- color := aec.WhiteF
- if event.Status == Done {
- color = aec.BlueF
- }
- if event.Status == Error {
- color = aec.RedF
- }
- return aec.Apply(o, color)
- }
-
- return o
-}
-
-func numDone(events map[string]Event) int {
- i := 0
- for _, e := range events {
- if e.Status == Done {
- i++
- }
- }
- return i
-}
-
-func align(l, r string, w int) string {
- return fmt.Sprintf("%-[2]*[1]s %[3]s", l, w-len(r)-1, r)
-}
diff --git a/pkg/progress/tty_test.go b/pkg/progress/tty_test.go
deleted file mode 100644
index f9b4071d1ad..00000000000
--- a/pkg/progress/tty_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package progress
-
-import (
- "fmt"
- "sync"
- "testing"
- "time"
-
- "gotest.tools/v3/assert"
-)
-
-func TestLineText(t *testing.T) {
- now := time.Now()
- ev := Event{
- ID: "id",
- Text: "Text",
- Status: Working,
- StatusText: "Status",
- endTime: now,
- startTime: now,
- spinner: &spinner{
- chars: []string{"."},
- },
- }
-
- lineWidth := len(fmt.Sprintf("%s %s", ev.ID, ev.Text))
-
- out := lineText(ev, "", 50, lineWidth, true)
- assert.Equal(t, out, "\x1b[37m . id Text Status 0.0s\n\x1b[0m")
-
- out = lineText(ev, "", 50, lineWidth, false)
- assert.Equal(t, out, " . id Text Status 0.0s\n")
-
- ev.Status = Done
- out = lineText(ev, "", 50, lineWidth, true)
- assert.Equal(t, out, "\x1b[34m . id Text Status 0.0s\n\x1b[0m")
-
- ev.Status = Error
- out = lineText(ev, "", 50, lineWidth, true)
- assert.Equal(t, out, "\x1b[31m . id Text Status 0.0s\n\x1b[0m")
-}
-
-func TestLineTextSingleEvent(t *testing.T) {
- now := time.Now()
- ev := Event{
- ID: "id",
- Text: "Text",
- Status: Done,
- StatusText: "Status",
- startTime: now,
- spinner: &spinner{
- chars: []string{"."},
- },
- }
-
- lineWidth := len(fmt.Sprintf("%s %s", ev.ID, ev.Text))
-
- out := lineText(ev, "", 50, lineWidth, true)
- assert.Equal(t, out, "\x1b[34m . id Text Status 0.0s\n\x1b[0m")
-}
-
-func TestErrorEvent(t *testing.T) {
- w := &ttyWriter{
- events: map[string]Event{},
- mtx: &sync.Mutex{},
- }
- e := Event{
- ID: "id",
- Text: "Text",
- Status: Working,
- StatusText: "Working",
- startTime: time.Now(),
- spinner: &spinner{
- chars: []string{"."},
- },
- }
- // Fire "Working" event and check end time isn't touched
- w.Event(e)
- event, ok := w.events[e.ID]
- assert.Assert(t, ok)
- assert.Assert(t, event.endTime.Equal(time.Time{}))
-
- // Fire "Error" event and check end time is set
- e.Status = Error
- w.Event(e)
- event, ok = w.events[e.ID]
- assert.Assert(t, ok)
- assert.Assert(t, event.endTime.After(time.Now().Add(-10*time.Second)))
-}
diff --git a/pkg/progress/writer.go b/pkg/progress/writer.go
deleted file mode 100644
index 2914364e430..00000000000
--- a/pkg/progress/writer.go
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package progress
-
-import (
- "context"
- "os"
- "sync"
-
- "github.com/containerd/console"
- "github.com/moby/term"
- "golang.org/x/sync/errgroup"
-)
-
-// Writer can write multiple progress events
-type Writer interface {
- Start(context.Context) error
- Stop()
- Event(Event)
- Events([]Event)
- TailMsgf(string, ...interface{})
-}
-
-type writerKey struct{}
-
-// WithContextWriter adds the writer to the context
-func WithContextWriter(ctx context.Context, writer Writer) context.Context {
- return context.WithValue(ctx, writerKey{}, writer)
-}
-
-// ContextWriter returns the writer from the context
-func ContextWriter(ctx context.Context) Writer {
- s, ok := ctx.Value(writerKey{}).(Writer)
- if !ok {
- return &noopWriter{}
- }
- return s
-}
-
-type progressFunc func(context.Context) error
-
-type progressFuncWithStatus func(context.Context) (string, error)
-
-// Run will run a writer and the progress function in parallel
-func Run(ctx context.Context, pf progressFunc) error {
- _, err := RunWithStatus(ctx, func(ctx context.Context) (string, error) {
- return "", pf(ctx)
- })
- return err
-}
-
-// RunWithStatus will run a writer and the progress function in parallel and return a status
-func RunWithStatus(ctx context.Context, pf progressFuncWithStatus) (string, error) {
- eg, _ := errgroup.WithContext(ctx)
- w, err := NewWriter(os.Stderr)
- var result string
- if err != nil {
- return "", err
- }
- eg.Go(func() error {
- return w.Start(context.Background())
- })
-
- ctx = WithContextWriter(ctx, w)
-
- eg.Go(func() error {
- defer w.Stop()
- s, err := pf(ctx)
- if err == nil {
- result = s
- }
- return err
- })
-
- err = eg.Wait()
- return result, err
-}
-
-const (
- // ModeAuto detect console capabilities
- ModeAuto = "auto"
- // ModeTTY use terminal capability for advanced rendering
- ModeTTY = "tty"
- // ModePlain dump raw events to output
- ModePlain = "plain"
-)
-
-// Mode define how progress should be rendered, either as ModePlain or ModeTTY
-var Mode = ModeAuto
-
-// NewWriter returns a new multi-progress writer
-func NewWriter(out console.File) (Writer, error) {
- _, isTerminal := term.GetFdInfo(out)
- if Mode == ModeAuto && isTerminal {
- return newTTYWriter(out)
- }
- if Mode == ModeTTY {
- return newTTYWriter(out)
- }
- return &plainWriter{
- out: out,
- done: make(chan bool),
- }, nil
-}
-
-func newTTYWriter(out console.File) (Writer, error) {
- con, err := console.ConsoleFromFile(out)
- if err != nil {
- return nil, err
- }
-
- return &ttyWriter{
- out: con,
- eventIDs: []string{},
- events: map[string]Event{},
- repeated: false,
- done: make(chan bool),
- mtx: &sync.Mutex{},
- }, nil
-}
diff --git a/pkg/prompt/prompt.go b/pkg/prompt/prompt.go
deleted file mode 100644
index 772de0acd93..00000000000
--- a/pkg/prompt/prompt.go
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package prompt
-
-import (
- "github.com/AlecAivazis/survey/v2"
-)
-
-//go:generate mockgen -destination=./prompt_mock.go -self_package "github.com/docker/compose/v2/pkg/prompt" -package=prompt . UI
-
-// UI - prompt user input
-type UI interface {
- Select(message string, options []string) (int, error)
- Input(message string, defaultValue string) (string, error)
- Confirm(message string, defaultValue bool) (bool, error)
- Password(message string) (string, error)
-}
-
-// User - aggregates prompt methods
-type User struct{}
-
-// Select - displays a list
-func (u User) Select(message string, options []string) (int, error) {
- qs := &survey.Select{
- Message: message,
- Options: options,
- }
- var selected int
- err := survey.AskOne(qs, &selected, nil)
- return selected, err
-}
-
-// Input text with default value
-func (u User) Input(message string, defaultValue string) (string, error) {
- qs := &survey.Input{
- Message: message,
- Default: defaultValue,
- }
- var s string
- err := survey.AskOne(qs, &s, nil)
- return s, err
-}
-
-// Confirm asks for yes or no input
-func (u User) Confirm(message string, defaultValue bool) (bool, error) {
- qs := &survey.Confirm{
- Message: message,
- Default: defaultValue,
- }
- var b bool
- err := survey.AskOne(qs, &b, nil)
- return b, err
-}
-
-// Password implements a text input with masked characters.
-func (u User) Password(message string) (string, error) {
- qs := &survey.Password{
- Message: message,
- }
- var p string
- err := survey.AskOne(qs, &p, nil)
- return p, err
-
-}
diff --git a/pkg/remote/cache.go b/pkg/remote/cache.go
new file mode 100644
index 00000000000..a0a6d03194e
--- /dev/null
+++ b/pkg/remote/cache.go
@@ -0,0 +1,36 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package remote
+
+import (
+ "os"
+ "path/filepath"
+)
+
+func cacheDir() (string, error) {
+ cache, ok := os.LookupEnv("XDG_CACHE_HOME")
+ if ok {
+ return filepath.Join(cache, "docker-compose"), nil
+ }
+
+ path, err := osDependentCacheDir()
+ if err != nil {
+ return "", err
+ }
+ err = os.MkdirAll(path, 0o700)
+ return path, err
+}
diff --git a/pkg/remote/cache_darwin.go b/pkg/remote/cache_darwin.go
new file mode 100644
index 00000000000..7830e8ade45
--- /dev/null
+++ b/pkg/remote/cache_darwin.go
@@ -0,0 +1,34 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package remote
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// Based on https://github.com/adrg/xdg
+// Licensed under MIT License (MIT)
+// Copyright (c) 2014 Adrian-George Bostan
+
+func osDependentCacheDir() (string, error) {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(home, "Library", "Caches", "docker-compose"), nil
+}
diff --git a/pkg/remote/cache_unix.go b/pkg/remote/cache_unix.go
new file mode 100644
index 00000000000..9887dc7bb4e
--- /dev/null
+++ b/pkg/remote/cache_unix.go
@@ -0,0 +1,36 @@
+//go:build linux || openbsd || freebsd
+
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package remote
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// Based on https://github.com/adrg/xdg
+// Licensed under MIT License (MIT)
+// Copyright (c) 2014 Adrian-George Bostan
+
+func osDependentCacheDir() (string, error) {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(home, ".cache", "docker-compose"), nil
+}
diff --git a/pkg/remote/cache_windows.go b/pkg/remote/cache_windows.go
new file mode 100644
index 00000000000..5bc7a2f1d92
--- /dev/null
+++ b/pkg/remote/cache_windows.go
@@ -0,0 +1,49 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package remote
+
+import (
+ "os"
+ "path/filepath"
+
+ "golang.org/x/sys/windows"
+)
+
+// Based on https://github.com/adrg/xdg
+// Licensed under MIT License (MIT)
+// Copyright (c) 2014 Adrian-George Bostan
+
+func osDependentCacheDir() (string, error) {
+ flags := []uint32{windows.KF_FLAG_DEFAULT, windows.KF_FLAG_DEFAULT_PATH}
+ for _, flag := range flags {
+ p, _ := windows.KnownFolderPath(windows.FOLDERID_LocalAppData, flag|windows.KF_FLAG_DONT_VERIFY)
+ if p != "" {
+ return filepath.Join(p, "cache", "docker-compose"), nil
+ }
+ }
+
+ appData, ok := os.LookupEnv("LOCALAPPDATA")
+ if ok {
+ return filepath.Join(appData, "cache", "docker-compose"), nil
+ }
+
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(home, "AppData", "Local", "cache", "docker-compose"), nil
+}
diff --git a/pkg/remote/git.go b/pkg/remote/git.go
new file mode 100644
index 00000000000..e49b40d83ab
--- /dev/null
+++ b/pkg/remote/git.go
@@ -0,0 +1,266 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package remote
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/compose-spec/compose-go/v2/cli"
+ "github.com/compose-spec/compose-go/v2/loader"
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/pkg/api"
+ gitutil "github.com/moby/buildkit/frontend/dockerfile/dfgitutil"
+ "github.com/sirupsen/logrus"
+)
+
+const GIT_REMOTE_ENABLED = "COMPOSE_EXPERIMENTAL_GIT_REMOTE"
+
+func gitRemoteLoaderEnabled() (bool, error) {
+ if v := os.Getenv(GIT_REMOTE_ENABLED); v != "" {
+ enabled, err := strconv.ParseBool(v)
+ if err != nil {
+ return false, fmt.Errorf("COMPOSE_EXPERIMENTAL_GIT_REMOTE environment variable expects boolean value: %w", err)
+ }
+ return enabled, err
+ }
+ return true, nil
+}
+
+func NewGitRemoteLoader(dockerCli command.Cli, offline bool) loader.ResourceLoader {
+ return gitRemoteLoader{
+ dockerCli: dockerCli,
+ offline: offline,
+ known: map[string]string{},
+ }
+}
+
+type gitRemoteLoader struct {
+ dockerCli command.Cli
+ offline bool
+ known map[string]string
+}
+
+func (g gitRemoteLoader) Accept(path string) bool {
+ _, _, err := gitutil.ParseGitRef(path)
+ return err == nil
+}
+
+var commitSHA = regexp.MustCompile(`^[a-f0-9]{40}$`)
+
+func (g gitRemoteLoader) Load(ctx context.Context, path string) (string, error) {
+ enabled, err := gitRemoteLoaderEnabled()
+ if err != nil {
+ return "", err
+ }
+ if !enabled {
+ return "", fmt.Errorf("git remote resource is disabled by %q", GIT_REMOTE_ENABLED)
+ }
+
+ ref, _, err := gitutil.ParseGitRef(path)
+ if err != nil {
+ return "", err
+ }
+
+ local, ok := g.known[path]
+ if !ok {
+ if ref.Ref == "" {
+ ref.Ref = "HEAD" // default branch
+ }
+
+ err = g.resolveGitRef(ctx, path, ref)
+ if err != nil {
+ return "", err
+ }
+
+ cache, err := cacheDir()
+ if err != nil {
+ return "", fmt.Errorf("initializing remote resource cache: %w", err)
+ }
+
+ local = filepath.Join(cache, ref.Ref)
+ if _, err := os.Stat(local); os.IsNotExist(err) {
+ if g.offline {
+ return "", nil
+ }
+ err = g.checkout(ctx, local, ref)
+ if err != nil {
+ return "", err
+ }
+ }
+ g.known[path] = local
+ }
+ if ref.SubDir != "" {
+ if err := validateGitSubDir(local, ref.SubDir); err != nil {
+ return "", err
+ }
+ local = filepath.Join(local, ref.SubDir)
+ }
+ stat, err := os.Stat(local)
+ if err != nil {
+ return "", err
+ }
+ if stat.IsDir() {
+ local, err = findFile(cli.DefaultFileNames, local)
+ }
+ return local, err
+}
+
+func (g gitRemoteLoader) Dir(path string) string {
+ return g.known[path]
+}
+
+// validateGitSubDir ensures a subdirectory path is contained within the base directory
+// and doesn't escape via path traversal. Unlike validatePathInBase for OCI artifacts,
+// this allows nested directories but prevents traversal outside the base.
+func validateGitSubDir(base, subDir string) error {
+ cleanSubDir := filepath.Clean(subDir)
+
+ if filepath.IsAbs(cleanSubDir) {
+ return fmt.Errorf("git subdirectory must be relative, got: %s", subDir)
+ }
+
+ if cleanSubDir == ".." || strings.HasPrefix(cleanSubDir, "../") || strings.HasPrefix(cleanSubDir, "..\\") {
+ return fmt.Errorf("git subdirectory path traversal detected: %s", subDir)
+ }
+
+ if len(cleanSubDir) >= 2 && cleanSubDir[1] == ':' {
+ return fmt.Errorf("git subdirectory must be relative, got: %s", subDir)
+ }
+
+ targetPath := filepath.Join(base, cleanSubDir)
+ cleanBase := filepath.Clean(base)
+ cleanTarget := filepath.Clean(targetPath)
+
+ // Ensure the target starts with the base path
+ relPath, err := filepath.Rel(cleanBase, cleanTarget)
+ if err != nil {
+ return fmt.Errorf("invalid git subdirectory path: %w", err)
+ }
+
+ if relPath == ".." || strings.HasPrefix(relPath, "../") || strings.HasPrefix(relPath, "..\\") {
+ return fmt.Errorf("git subdirectory escapes base directory: %s", subDir)
+ }
+
+ return nil
+}
+
+func (g gitRemoteLoader) resolveGitRef(ctx context.Context, path string, ref *gitutil.GitRef) error {
+ if !commitSHA.MatchString(ref.Ref) {
+ cmd := exec.CommandContext(ctx, "git", "ls-remote", "--exit-code", ref.Remote, ref.Ref)
+ cmd.Env = g.gitCommandEnv()
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ if cmd.ProcessState.ExitCode() == 2 {
+ return fmt.Errorf("repository does not contain ref %s, output: %q: %w", path, string(out), err)
+ }
+ return fmt.Errorf("failed to access repository at %s:\n %s", ref.Remote, out)
+ }
+ if len(out) < 40 {
+ return fmt.Errorf("unexpected git command output: %q", string(out))
+ }
+ sha := string(out[:40])
+ if !commitSHA.MatchString(sha) {
+ return fmt.Errorf("invalid commit sha %q", sha)
+ }
+ ref.Ref = sha
+ }
+ return nil
+}
+
+func (g gitRemoteLoader) checkout(ctx context.Context, path string, ref *gitutil.GitRef) error {
+ err := os.MkdirAll(path, 0o700)
+ if err != nil {
+ return err
+ }
+ err = exec.CommandContext(ctx, "git", "init", path).Run()
+ if err != nil {
+ return err
+ }
+
+ cmd := exec.CommandContext(ctx, "git", "remote", "add", "origin", ref.Remote)
+ cmd.Dir = path
+ err = cmd.Run()
+ if err != nil {
+ return err
+ }
+
+ cmd = exec.CommandContext(ctx, "git", "fetch", "--depth=1", "origin", ref.Ref)
+ cmd.Env = g.gitCommandEnv()
+ cmd.Dir = path
+
+ err = g.run(cmd)
+ if err != nil {
+ return err
+ }
+
+ cmd = exec.CommandContext(ctx, "git", "checkout", ref.Ref)
+ cmd.Dir = path
+ err = cmd.Run()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (g gitRemoteLoader) run(cmd *exec.Cmd) error {
+ if logrus.IsLevelEnabled(logrus.DebugLevel) {
+ output, err := cmd.CombinedOutput()
+ scanner := bufio.NewScanner(bytes.NewBuffer(output))
+ for scanner.Scan() {
+ line := scanner.Text()
+ logrus.Debug(line)
+ }
+ return err
+ }
+ return cmd.Run()
+}
+
+func (g gitRemoteLoader) gitCommandEnv() []string {
+ env := types.NewMapping(os.Environ())
+ if env["GIT_TERMINAL_PROMPT"] == "" {
+ // Disable prompting for passwords by Git until user explicitly asks for it.
+ env["GIT_TERMINAL_PROMPT"] = "0"
+ }
+ if env["GIT_SSH"] == "" && env["GIT_SSH_COMMAND"] == "" {
+ // Disable any ssh connection pooling by Git and do not attempt to prompt the user.
+ env["GIT_SSH_COMMAND"] = "ssh -o ControlMaster=no -o BatchMode=yes"
+ }
+ v := env.Values()
+ return v
+}
+
+func findFile(names []string, pwd string) (string, error) {
+ for _, n := range names {
+ f := filepath.Join(pwd, n)
+ if fi, err := os.Stat(f); err == nil && !fi.IsDir() {
+ return f, nil
+ }
+ }
+ return "", api.ErrNotFound
+}
+
+var _ loader.ResourceLoader = gitRemoteLoader{}
diff --git a/pkg/remote/git_test.go b/pkg/remote/git_test.go
new file mode 100644
index 00000000000..f78bcc25dbb
--- /dev/null
+++ b/pkg/remote/git_test.go
@@ -0,0 +1,175 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package remote
+
+import (
+ "testing"
+
+ "gotest.tools/v3/assert"
+)
+
+func TestValidateGitSubDir(t *testing.T) {
+ base := "/tmp/cache/compose/abc123def456"
+
+ tests := []struct {
+ name string
+ subDir string
+ wantErr bool
+ }{
+ {
+ name: "valid simple directory",
+ subDir: "examples",
+ wantErr: false,
+ },
+ {
+ name: "valid nested directory",
+ subDir: "examples/nginx",
+ wantErr: false,
+ },
+ {
+ name: "valid deeply nested directory",
+ subDir: "examples/web/frontend/config",
+ wantErr: false,
+ },
+ {
+ name: "valid current directory",
+ subDir: ".",
+ wantErr: false,
+ },
+ {
+ name: "valid directory with redundant separators",
+ subDir: "examples//nginx",
+ wantErr: false,
+ },
+ {
+ name: "valid directory with dots in name",
+ subDir: "examples/nginx.conf.d",
+ wantErr: false,
+ },
+ {
+ name: "path traversal - parent directory",
+ subDir: "..",
+ wantErr: true,
+ },
+ {
+ name: "path traversal - multiple parent directories",
+ subDir: "../../../etc/passwd",
+ wantErr: true,
+ },
+ {
+ name: "path traversal - deeply nested escape",
+ subDir: "../../../../../../../tmp/pwned",
+ wantErr: true,
+ },
+ {
+ name: "path traversal - mixed with valid path",
+ subDir: "examples/../../etc/passwd",
+ wantErr: true,
+ },
+ {
+ name: "path traversal - at the end",
+ subDir: "examples/..",
+ wantErr: false, // This resolves to "." which is the current directory, safe
+ },
+ {
+ name: "path traversal - in the middle",
+ subDir: "examples/../../../etc/passwd",
+ wantErr: true,
+ },
+ {
+ name: "path traversal - windows style",
+ subDir: "..\\..\\..\\windows\\system32",
+ wantErr: true,
+ },
+ {
+ name: "absolute unix path",
+ subDir: "/etc/passwd",
+ wantErr: true,
+ },
+ {
+ name: "absolute windows path",
+ subDir: "C:\\windows\\system32\\config\\sam",
+ wantErr: true,
+ },
+ {
+ name: "absolute path with home directory",
+ subDir: "/home/user/.ssh/id_rsa",
+ wantErr: true,
+ },
+ {
+ name: "normalized path that would escape",
+ subDir: "./../../etc/passwd",
+ wantErr: true,
+ },
+ {
+ name: "directory name with three dots",
+ subDir: ".../config",
+ wantErr: false,
+ },
+ {
+ name: "directory name with four dots",
+ subDir: "..../config",
+ wantErr: false,
+ },
+ {
+ name: "directory name with five dots",
+ subDir: "...../etc/passwd",
+ wantErr: false, // ".....'' is a valid directory name, not path traversal
+ },
+ {
+ name: "directory name starting with two dots and letter",
+ subDir: "..foo/bar",
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := validateGitSubDir(base, tt.subDir)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("validateGitSubDir(%q, %q) error = %v, wantErr %v",
+ base, tt.subDir, err, tt.wantErr)
+ }
+ })
+ }
+}
+
+// TestValidateGitSubDirSecurityScenarios tests specific security scenarios
+func TestValidateGitSubDirSecurityScenarios(t *testing.T) {
+ base := "/var/cache/docker-compose/git/1234567890abcdef"
+
+ // Test the exact vulnerability scenario from the issue
+ t.Run("CVE scenario - /tmp traversal", func(t *testing.T) {
+ maliciousPath := "../../../../../../../tmp/pwned"
+ err := validateGitSubDir(base, maliciousPath)
+ assert.ErrorContains(t, err, "path traversal")
+ })
+
+ // Test variations of the attack
+ t.Run("CVE scenario - /etc traversal", func(t *testing.T) {
+ maliciousPath := "../../../../../../../../etc/passwd"
+ err := validateGitSubDir(base, maliciousPath)
+ assert.ErrorContains(t, err, "path traversal")
+ })
+
+ // Test that legitimate nested paths still work
+ t.Run("legitimate nested path", func(t *testing.T) {
+ validPath := "examples/docker-compose/nginx/config"
+ err := validateGitSubDir(base, validPath)
+ assert.NilError(t, err)
+ })
+}
diff --git a/pkg/remote/oci.go b/pkg/remote/oci.go
new file mode 100644
index 00000000000..d74d70a091c
--- /dev/null
+++ b/pkg/remote/oci.go
@@ -0,0 +1,261 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package remote
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/compose-spec/compose-go/v2/loader"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/core/remotes"
+ "github.com/distribution/reference"
+ "github.com/docker/cli/cli/command"
+ "github.com/docker/compose/v5/internal/oci"
+ "github.com/docker/compose/v5/pkg/api"
+ spec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+const (
+ OCI_REMOTE_ENABLED = "COMPOSE_EXPERIMENTAL_OCI_REMOTE"
+ OciPrefix = "oci://"
+)
+
+// validatePathInBase ensures a file path is contained within the base directory,
+// as OCI artifacts resources must all live within the same folder.
+func validatePathInBase(base, unsafePath string) error {
+ // Reject paths with path separators regardless of OS
+ if strings.ContainsAny(unsafePath, "\\/") {
+ return fmt.Errorf("invalid OCI artifact")
+ }
+
+ // Join the base with the untrusted path
+ targetPath := filepath.Join(base, unsafePath)
+
+ // Get the directory of the target path
+ targetDir := filepath.Dir(targetPath)
+
+ // Clean both paths to resolve any .. or . components
+ cleanBase := filepath.Clean(base)
+ cleanTargetDir := filepath.Clean(targetDir)
+
+ // Check if the target directory is the same as base directory
+ if cleanTargetDir != cleanBase {
+ return fmt.Errorf("invalid OCI artifact")
+ }
+
+ return nil
+}
+
+func ociRemoteLoaderEnabled() (bool, error) {
+ if v := os.Getenv(OCI_REMOTE_ENABLED); v != "" {
+ enabled, err := strconv.ParseBool(v)
+ if err != nil {
+ return false, fmt.Errorf("COMPOSE_EXPERIMENTAL_OCI_REMOTE environment variable expects boolean value: %w", err)
+ }
+ return enabled, err
+ }
+ return true, nil
+}
+
+func NewOCIRemoteLoader(dockerCli command.Cli, offline bool, options api.OCIOptions) loader.ResourceLoader {
+ return ociRemoteLoader{
+ dockerCli: dockerCli,
+ offline: offline,
+ known: map[string]string{},
+ insecureRegistries: options.InsecureRegistries,
+ }
+}
+
+type ociRemoteLoader struct {
+ dockerCli command.Cli
+ offline bool
+ known map[string]string
+ insecureRegistries []string
+}
+
+func (g ociRemoteLoader) Accept(path string) bool {
+ return strings.HasPrefix(path, OciPrefix)
+}
+
+//nolint:gocyclo
+func (g ociRemoteLoader) Load(ctx context.Context, path string) (string, error) {
+ enabled, err := ociRemoteLoaderEnabled()
+ if err != nil {
+ return "", err
+ }
+ if !enabled {
+ return "", fmt.Errorf("OCI remote resource is disabled by %q", OCI_REMOTE_ENABLED)
+ }
+
+ if g.offline {
+ return "", nil
+ }
+
+ local, ok := g.known[path]
+ if !ok {
+ ref, err := reference.ParseDockerRef(path[len(OciPrefix):])
+ if err != nil {
+ return "", err
+ }
+
+ resolver := oci.NewResolver(g.dockerCli.ConfigFile(), g.insecureRegistries...)
+
+ descriptor, content, err := oci.Get(ctx, resolver, ref)
+ if err != nil {
+ return "", fmt.Errorf("failed to pull OCI resource %q: %w", ref, err)
+ }
+
+ cache, err := cacheDir()
+ if err != nil {
+ return "", fmt.Errorf("initializing remote resource cache: %w", err)
+ }
+
+ local = filepath.Join(cache, descriptor.Digest.Hex())
+ if _, err = os.Stat(local); os.IsNotExist(err) {
+
+ // a Compose application bundle is published as image index
+ if images.IsIndexType(descriptor.MediaType) {
+ var index spec.Index
+ err = json.Unmarshal(content, &index)
+ if err != nil {
+ return "", err
+ }
+ found := false
+ for _, manifest := range index.Manifests {
+ if manifest.ArtifactType != oci.ComposeProjectArtifactType {
+ continue
+ }
+ found = true
+ digested, err := reference.WithDigest(ref, manifest.Digest)
+ if err != nil {
+ return "", err
+ }
+ descriptor, content, err = oci.Get(ctx, resolver, digested)
+ if err != nil {
+ return "", fmt.Errorf("failed to pull OCI resource %q: %w", ref, err)
+ }
+ }
+ if !found {
+ return "", fmt.Errorf("OCI index %s doesn't refer to compose artifacts", ref)
+ }
+ }
+
+ var manifest spec.Manifest
+ err = json.Unmarshal(content, &manifest)
+ if err != nil {
+ return "", err
+ }
+
+ err = g.pullComposeFiles(ctx, local, manifest, ref, resolver)
+ if err != nil {
+ // we need to clean up the directory to be sure we won't let empty files present
+ _ = os.RemoveAll(local)
+ return "", err
+ }
+ }
+ g.known[path] = local
+ }
+ return filepath.Join(local, "compose.yaml"), nil
+}
+
+func (g ociRemoteLoader) Dir(path string) string {
+ return g.known[path]
+}
+
+func (g ociRemoteLoader) pullComposeFiles(ctx context.Context, local string, manifest spec.Manifest, ref reference.Named, resolver remotes.Resolver) error {
+ err := os.MkdirAll(local, 0o700)
+ if err != nil {
+ return err
+ }
+ if (manifest.ArtifactType != "" && manifest.ArtifactType != oci.ComposeProjectArtifactType) ||
+ (manifest.ArtifactType == "" && manifest.Config.MediaType != oci.ComposeEmptyConfigMediaType) {
+ return fmt.Errorf("%s is not a compose project OCI artifact, but %s", ref.String(), manifest.ArtifactType)
+ }
+
+ for i, layer := range manifest.Layers {
+ digested, err := reference.WithDigest(ref, layer.Digest)
+ if err != nil {
+ return err
+ }
+
+ _, content, err := oci.Get(ctx, resolver, digested)
+ if err != nil {
+ return err
+ }
+
+ switch layer.MediaType {
+ case oci.ComposeYAMLMediaType:
+ if err := writeComposeFile(layer, i, local, content); err != nil {
+ return err
+ }
+ case oci.ComposeEnvFileMediaType:
+ if err := writeEnvFile(layer, local, content); err != nil {
+ return err
+ }
+ case oci.ComposeEmptyConfigMediaType:
+ }
+ }
+ return nil
+}
+
+func writeComposeFile(layer spec.Descriptor, i int, local string, content []byte) error {
+ file := "compose.yaml"
+ if _, ok := layer.Annotations["com.docker.compose.extends"]; ok {
+ file = layer.Annotations["com.docker.compose.file"]
+ if err := validatePathInBase(local, file); err != nil {
+ return err
+ }
+ }
+ f, err := os.OpenFile(filepath.Join(local, file), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0o600)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = f.Close() }()
+ if _, ok := layer.Annotations["com.docker.compose.file"]; i > 0 && ok {
+ _, err := f.Write([]byte("\n---\n"))
+ if err != nil {
+ return err
+ }
+ }
+ _, err = f.Write(content)
+ return err
+}
+
+func writeEnvFile(layer spec.Descriptor, local string, content []byte) error {
+ envfilePath, ok := layer.Annotations["com.docker.compose.envfile"]
+ if !ok {
+ return fmt.Errorf("missing annotation com.docker.compose.envfile in layer %q", layer.Digest)
+ }
+ if err := validatePathInBase(local, envfilePath); err != nil {
+ return err
+ }
+ otherFile, err := os.Create(filepath.Join(local, envfilePath))
+ if err != nil {
+ return err
+ }
+ defer func() { _ = otherFile.Close() }()
+ _, err = otherFile.Write(content)
+ return err
+}
+
+var _ loader.ResourceLoader = ociRemoteLoader{}
diff --git a/pkg/remote/oci_test.go b/pkg/remote/oci_test.go
new file mode 100644
index 00000000000..28a4dbd4847
--- /dev/null
+++ b/pkg/remote/oci_test.go
@@ -0,0 +1,139 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package remote
+
+import (
+ "path/filepath"
+ "testing"
+
+ spec "github.com/opencontainers/image-spec/specs-go/v1"
+ "gotest.tools/v3/assert"
+)
+
+func TestValidatePathInBase(t *testing.T) {
+ base := "/tmp/cache/compose"
+
+ tests := []struct {
+ name string
+ unsafePath string
+ wantErr bool
+ }{
+ {
+ name: "valid simple filename",
+ unsafePath: "compose.yaml",
+ wantErr: false,
+ },
+ {
+ name: "valid hashed filename",
+ unsafePath: "f8f9ede3d201ec37d5a5e3a77bbadab79af26035e53135e19571f50d541d390c.yaml",
+ wantErr: false,
+ },
+ {
+ name: "valid env file",
+ unsafePath: ".env",
+ wantErr: false,
+ },
+ {
+ name: "valid env file with suffix",
+ unsafePath: ".env.prod",
+ wantErr: false,
+ },
+ {
+ name: "unix path traversal",
+ unsafePath: "../../../etc/passwd",
+ wantErr: true,
+ },
+ {
+ name: "windows path traversal",
+ unsafePath: "..\\..\\..\\windows\\system32\\config\\sam",
+ wantErr: true,
+ },
+ {
+ name: "subdirectory unix",
+ unsafePath: "config/base.yaml",
+ wantErr: true,
+ },
+ {
+ name: "subdirectory windows",
+ unsafePath: "config\\base.yaml",
+ wantErr: true,
+ },
+ {
+ name: "absolute unix path",
+ unsafePath: "/etc/passwd",
+ wantErr: true,
+ },
+ {
+ name: "absolute windows path",
+ unsafePath: "C:\\windows\\system32\\config\\sam",
+ wantErr: true,
+ },
+ {
+ name: "parent reference only",
+ unsafePath: "..",
+ wantErr: true,
+ },
+ {
+ name: "mixed separators",
+ unsafePath: "config/sub\\file.yaml",
+ wantErr: true,
+ },
+ {
+ name: "filename with spaces",
+ unsafePath: "my file.yaml",
+ wantErr: false,
+ },
+ {
+ name: "filename with special chars",
+ unsafePath: "file-name_v1.2.3.yaml",
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := validatePathInBase(base, tt.unsafePath)
+ if (err != nil) != tt.wantErr {
+ targetPath := filepath.Join(base, tt.unsafePath)
+ targetDir := filepath.Dir(targetPath)
+ t.Errorf("validatePathInBase(%q, %q) error = %v, wantErr %v\ntargetDir=%q base=%q",
+ base, tt.unsafePath, err, tt.wantErr, targetDir, base)
+ }
+ })
+ }
+}
+
+func TestWriteComposeFileWithExtendsPathTraversal(t *testing.T) {
+ tmpDir := t.TempDir()
+
+ // Create a layer with com.docker.compose.extends=true and a path traversal attempt
+ layer := spec.Descriptor{
+ MediaType: "application/vnd.docker.compose.file.v1+yaml",
+ Digest: "sha256:test123",
+ Size: 100,
+ Annotations: map[string]string{
+ "com.docker.compose.extends": "true",
+ "com.docker.compose.file": "../other",
+ },
+ }
+
+ content := []byte("services:\n test:\n image: nginx\n")
+
+ // writeComposeFile should return an error due to path traversal
+ err := writeComposeFile(layer, 0, tmpDir, content)
+ assert.Error(t, err, "invalid OCI artifact")
+}
diff --git a/pkg/utils/durationutils.go b/pkg/utils/durationutils.go
new file mode 100644
index 00000000000..98ab3c91615
--- /dev/null
+++ b/pkg/utils/durationutils.go
@@ -0,0 +1,27 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package utils
+
+import "time"
+
+func DurationSecondToInt(d *time.Duration) *int {
+ if d == nil {
+ return nil
+ }
+ timeout := int(d.Seconds())
+ return &timeout
+}
diff --git a/pkg/utils/safebuffer.go b/pkg/utils/safebuffer.go
new file mode 100644
index 00000000000..0545c463ce6
--- /dev/null
+++ b/pkg/utils/safebuffer.go
@@ -0,0 +1,78 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package utils
+
+import (
+ "bytes"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+// SafeBuffer is a thread safe version of bytes.Buffer
+type SafeBuffer struct {
+ m sync.RWMutex
+ b bytes.Buffer
+}
+
+// Read is a thread safe version of bytes.Buffer::Read
+func (b *SafeBuffer) Read(p []byte) (n int, err error) {
+ b.m.RLock()
+ defer b.m.RUnlock()
+ return b.b.Read(p)
+}
+
+// Write is a thread safe version of bytes.Buffer::Write
+func (b *SafeBuffer) Write(p []byte) (n int, err error) {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.b.Write(p)
+}
+
+// String is a thread safe version of bytes.Buffer::String
+func (b *SafeBuffer) String() string {
+ b.m.RLock()
+ defer b.m.RUnlock()
+ return b.b.String()
+}
+
+// Bytes is a thread safe version of bytes.Buffer::Bytes
+func (b *SafeBuffer) Bytes() []byte {
+ b.m.RLock()
+ defer b.m.RUnlock()
+ return b.b.Bytes()
+}
+
+// RequireEventuallyContains is a thread safe eventual checker for the buffer content
+func (b *SafeBuffer) RequireEventuallyContains(t testing.TB, v string) {
+ t.Helper()
+ var bufContents strings.Builder
+ require.Eventuallyf(t, func() bool {
+ b.m.Lock()
+ defer b.m.Unlock()
+ if _, err := b.b.WriteTo(&bufContents); err != nil {
+ require.FailNowf(t, "Failed to copy from buffer",
+ "Error: %v", err)
+ }
+ return strings.Contains(bufContents.String(), v)
+ }, 2*time.Second, 20*time.Millisecond,
+ "Buffer did not contain %q\n============\n%s\n============",
+ v, &bufContents)
+}
diff --git a/pkg/utils/scan_suggest.go b/pkg/utils/scan_suggest.go
deleted file mode 100644
index 93059f80c48..00000000000
--- a/pkg/utils/scan_suggest.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- Copyright 2020 Docker Compose CLI authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package utils
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
-
- pluginmanager "github.com/docker/cli/cli-plugins/manager"
- "github.com/docker/cli/cli/command"
- cliConfig "github.com/docker/cli/cli/config"
-)
-
-// ScanSuggestMsg display a message after a successful build to suggest use of `docker scan` command
-const ScanSuggestMsg = "Use 'docker scan' to run Snyk tests against images to find vulnerabilities and learn how to fix them"
-
-// DisplayScanSuggestMsg displlay a message suggesting users can scan new image
-func DisplayScanSuggestMsg() {
- if os.Getenv("DOCKER_SCAN_SUGGEST") == "false" {
- return
- }
- if !scanAvailable() {
- return
- }
- if scanAlreadyInvoked() {
- return
- }
- fmt.Fprintf(os.Stderr, "\n"+ScanSuggestMsg+"\n")
-}
-
-func scanAlreadyInvoked() bool {
- filename := filepath.Join(cliConfig.Dir(), "scan", "config.json")
- f, err := os.Stat(filename)
- if os.IsNotExist(err) {
- return false
- }
- if f.IsDir() { // should never happen, do not bother user with suggestion if something goes wrong
- return true
- }
- type scanOptin struct {
- Optin bool `json:"optin"`
- }
- data, err := ioutil.ReadFile(filename)
- if err != nil {
- return true
- }
- scanConfig := scanOptin{}
- err = json.Unmarshal(data, &scanConfig)
- if err != nil {
- return true
- }
- return scanConfig.Optin
-}
-
-func scanAvailable() bool {
- cli, err := command.NewDockerCli()
- if err != nil {
- return false
- }
- plugins, err := pluginmanager.ListPlugins(cli, nil)
- if err != nil {
- return false
- }
- for _, plugin := range plugins {
- if plugin.Name == "scan" {
- return true
- }
- }
- return false
-}
diff --git a/pkg/utils/set.go b/pkg/utils/set.go
new file mode 100644
index 00000000000..5a092d7c266
--- /dev/null
+++ b/pkg/utils/set.go
@@ -0,0 +1,93 @@
+/*
+
+ Copyright 2020 Docker Compose CLI authors
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package utils
+
+type Set[T comparable] map[T]struct{}
+
+func NewSet[T comparable](v ...T) Set[T] {
+ if len(v) == 0 {
+ return make(Set[T])
+ }
+
+ out := make(Set[T], len(v))
+ for i := range v {
+ out.Add(v[i])
+ }
+ return out
+}
+
+func (s Set[T]) Has(v T) bool {
+ _, ok := s[v]
+ return ok
+}
+
+func (s Set[T]) Add(v T) {
+ s[v] = struct{}{}
+}
+
+func (s Set[T]) AddAll(v ...T) {
+ for _, e := range v {
+ s[e] = struct{}{}
+ }
+}
+
+func (s Set[T]) Remove(v T) bool {
+ _, ok := s[v]
+ if ok {
+ delete(s, v)
+ }
+ return ok
+}
+
+func (s Set[T]) Clear() {
+ for v := range s {
+ delete(s, v)
+ }
+}
+
+func (s Set[T]) Elements() []T {
+ elements := make([]T, 0, len(s))
+ for v := range s {
+ elements = append(elements, v)
+ }
+ return elements
+}
+
+func (s Set[T]) RemoveAll(elements ...T) {
+ for _, e := range elements {
+ s.Remove(e)
+ }
+}
+
+func (s Set[T]) Diff(other Set[T]) Set[T] {
+ out := make(Set[T])
+ for k := range s {
+ if _, ok := other[k]; !ok {
+ out[k] = struct{}{}
+ }
+ }
+ return out
+}
+
+func (s Set[T]) Union(other Set[T]) Set[T] {
+ out := make(Set[T])
+ for k := range s {
+ out[k] = struct{}{}
+ }
+ for k := range other {
+ out[k] = struct{}{}
+ }
+ return out
+}
diff --git a/pkg/utils/set_test.go b/pkg/utils/set_test.go
new file mode 100644
index 00000000000..5bdd6cca3f9
--- /dev/null
+++ b/pkg/utils/set_test.go
@@ -0,0 +1,41 @@
+/*
+ Copyright 2022 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package utils
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestSet_Has(t *testing.T) {
+ x := NewSet[string]("value")
+ require.True(t, x.Has("value"))
+ require.False(t, x.Has("VALUE"))
+}
+
+func TestSet_Diff(t *testing.T) {
+ a := NewSet[int](1, 2)
+ b := NewSet[int](2, 3)
+ require.ElementsMatch(t, []int{1}, a.Diff(b).Elements())
+ require.ElementsMatch(t, []int{3}, b.Diff(a).Elements())
+}
+
+func TestSet_Union(t *testing.T) {
+ a := NewSet[int](1, 2)
+ b := NewSet[int](2, 3)
+ require.ElementsMatch(t, []int{1, 2, 3}, a.Union(b).Elements())
+ require.ElementsMatch(t, []int{1, 2, 3}, b.Union(a).Elements())
+}
diff --git a/pkg/utils/stringutils.go b/pkg/utils/stringutils.go
index 01dd423870b..7135e91781c 100644
--- a/pkg/utils/stringutils.go
+++ b/pkg/utils/stringutils.go
@@ -21,18 +21,12 @@ import (
"strings"
)
-// StringContains check if an array contains a specific value
-func StringContains(array []string, needle string) bool {
- for _, val := range array {
- if val == needle {
- return true
- }
- }
- return false
-}
-
// StringToBool converts a string to a boolean ignoring errors
func StringToBool(s string) bool {
- b, _ := strconv.ParseBool(strings.ToLower(strings.TrimSpace(s)))
+ s = strings.ToLower(strings.TrimSpace(s))
+ if s == "y" {
+ return true
+ }
+ b, _ := strconv.ParseBool(s)
return b
}
diff --git a/pkg/utils/writer.go b/pkg/utils/writer.go
index 83f0bf5c3cb..1b4c8ca14a9 100644
--- a/pkg/utils/writer.go
+++ b/pkg/utils/writer.go
@@ -21,7 +21,7 @@ import (
"io"
)
-// GetWriter creates a io.Writer that will actually split by line and format by LogConsumer
+// GetWriter creates an io.Writer that will actually split by line and format by LogConsumer
func GetWriter(consumer func(string)) io.WriteCloser {
return &splitWriter{
buffer: bytes.Buffer{},
diff --git a/pkg/utils/writer_test.go b/pkg/utils/writer_test.go
index 7eedb844f17..bb7aed03752 100644
--- a/pkg/utils/writer_test.go
+++ b/pkg/utils/writer_test.go
@@ -22,18 +22,18 @@ import (
"gotest.tools/v3/assert"
)
+//nolint:errcheck
func TestSplitWriter(t *testing.T) {
var lines []string
w := GetWriter(func(line string) {
lines = append(lines, line)
})
- w.Write([]byte("h")) //nolint: errcheck
- w.Write([]byte("e")) //nolint: errcheck
- w.Write([]byte("l")) //nolint: errcheck
- w.Write([]byte("l")) //nolint: errcheck
- w.Write([]byte("o")) //nolint: errcheck
- w.Write([]byte("\n")) //nolint: errcheck
- w.Write([]byte("world!\n")) //nolint: errcheck
+ w.Write([]byte("h"))
+ w.Write([]byte("e"))
+ w.Write([]byte("l"))
+ w.Write([]byte("l"))
+ w.Write([]byte("o"))
+ w.Write([]byte("\n"))
+ w.Write([]byte("world!\n"))
assert.DeepEqual(t, lines, []string{"hello", "world!"})
-
}
diff --git a/pkg/watch/debounce.go b/pkg/watch/debounce.go
new file mode 100644
index 00000000000..e14da149b52
--- /dev/null
+++ b/pkg/watch/debounce.go
@@ -0,0 +1,73 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "context"
+ "time"
+
+ "github.com/docker/compose/v5/pkg/utils"
+ "github.com/jonboulle/clockwork"
+ "github.com/sirupsen/logrus"
+)
+
+const QuietPeriod = 500 * time.Millisecond
+
+// BatchDebounceEvents groups identical file events within a sliding time window and writes the results to the returned
+// channel.
+//
+// The returned channel is closed when the debouncer is stopped via context cancellation or by closing the input channel.
+func BatchDebounceEvents(ctx context.Context, clock clockwork.Clock, input <-chan FileEvent) <-chan []FileEvent {
+ out := make(chan []FileEvent)
+ go func() {
+ defer close(out)
+ seen := utils.Set[FileEvent]{}
+ flushEvents := func() {
+ if len(seen) == 0 {
+ return
+ }
+ logrus.Debugf("flush: %d events %s", len(seen), seen)
+
+ events := make([]FileEvent, 0, len(seen))
+ for e := range seen {
+ events = append(events, e)
+ }
+ out <- events
+ seen = utils.Set[FileEvent]{}
+ }
+
+ t := clock.NewTicker(QuietPeriod)
+ defer t.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-t.Chan():
+ flushEvents()
+ case e, ok := <-input:
+ if !ok {
+ // input channel was closed
+ flushEvents()
+ return
+ }
+ if _, ok := seen[e]; !ok {
+ seen.Add(e)
+ }
+ t.Reset(QuietPeriod)
+ }
+ }
+ }()
+ return out
+}
diff --git a/pkg/watch/debounce_test.go b/pkg/watch/debounce_test.go
new file mode 100644
index 00000000000..fd1c40bbf27
--- /dev/null
+++ b/pkg/watch/debounce_test.go
@@ -0,0 +1,66 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "context"
+ "slices"
+ "testing"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+ "gotest.tools/v3/assert"
+)
+
+func Test_BatchDebounceEvents(t *testing.T) {
+ ch := make(chan FileEvent)
+ clock := clockwork.NewFakeClock()
+ ctx, stop := context.WithCancel(context.Background())
+ t.Cleanup(stop)
+
+ eventBatchCh := BatchDebounceEvents(ctx, clock, ch)
+ for i := 0; i < 100; i++ {
+ path := "/a"
+ if i%2 == 0 {
+ path = "/b"
+ }
+
+ ch <- FileEvent(path)
+ }
+ // we sent 100 events + the debouncer
+ err := clock.BlockUntilContext(ctx, 101)
+ assert.NilError(t, err)
+ clock.Advance(QuietPeriod)
+ select {
+ case batch := <-eventBatchCh:
+ slices.Sort(batch)
+ assert.Equal(t, len(batch), 2)
+ assert.Equal(t, batch[0], FileEvent("/a"))
+ assert.Equal(t, batch[1], FileEvent("/b"))
+ case <-time.After(50 * time.Millisecond):
+ t.Fatal("timed out waiting for events")
+ }
+ err = clock.BlockUntilContext(ctx, 1)
+ assert.NilError(t, err)
+ clock.Advance(QuietPeriod)
+
+ // there should only be a single batch
+ select {
+ case batch := <-eventBatchCh:
+ t.Fatalf("unexpected events: %v", batch)
+ case <-time.After(50 * time.Millisecond):
+ // channel is empty
+ }
+}
diff --git a/pkg/watch/dockerignore.go b/pkg/watch/dockerignore.go
new file mode 100644
index 00000000000..5dd5f343f97
--- /dev/null
+++ b/pkg/watch/dockerignore.go
@@ -0,0 +1,170 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "slices"
+ "strings"
+
+ "github.com/compose-spec/compose-go/v2/types"
+ "github.com/docker/compose/v5/internal/paths"
+ "github.com/moby/patternmatcher"
+ "github.com/moby/patternmatcher/ignorefile"
+)
+
+type dockerPathMatcher struct {
+ repoRoot string
+ matcher *patternmatcher.PatternMatcher
+}
+
+func (i dockerPathMatcher) Matches(f string) (bool, error) {
+ if !filepath.IsAbs(f) {
+ f = filepath.Join(i.repoRoot, f)
+ }
+ return i.matcher.MatchesOrParentMatches(f)
+}
+
+func (i dockerPathMatcher) MatchesEntireDir(f string) (bool, error) {
+ matches, err := i.Matches(f)
+ if !matches || err != nil {
+ return matches, err
+ }
+
+ // We match the dir, but we might exclude files underneath it.
+ if i.matcher.Exclusions() {
+ for _, pattern := range i.matcher.Patterns() {
+ if !pattern.Exclusion() {
+ continue
+ }
+ if paths.IsChild(f, pattern.String()) {
+ // Found an exclusion match -- we don't match this whole dir
+ return false, nil
+ }
+ }
+ return true, nil
+ }
+ return true, nil
+}
+
+func LoadDockerIgnore(build *types.BuildConfig) (PathMatcher, error) {
+ if build == nil {
+ return EmptyMatcher{}, nil
+ }
+ repoRoot := build.Context
+ absRoot, err := filepath.Abs(repoRoot)
+ if err != nil {
+ return nil, err
+ }
+
+ // first try Dockerfile-specific ignore-file
+ f, err := os.Open(filepath.Join(repoRoot, build.Dockerfile+".dockerignore"))
+ if os.IsNotExist(err) {
+ // defaults to a global .dockerignore
+ f, err = os.Open(filepath.Join(repoRoot, ".dockerignore"))
+ if os.IsNotExist(err) {
+ return NewDockerPatternMatcher(repoRoot, nil)
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = f.Close() }()
+
+ patterns, err := readDockerignorePatterns(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewDockerPatternMatcher(absRoot, patterns)
+}
+
+// Make all the patterns use absolute paths.
+func absPatterns(absRoot string, patterns []string) []string {
+ absPatterns := make([]string, 0, len(patterns))
+ for _, p := range patterns {
+ // The pattern parsing here is loosely adapted from fileutils' NewPatternMatcher
+ p = strings.TrimSpace(p)
+ if p == "" {
+ continue
+ }
+ p = filepath.Clean(p)
+
+ pPath := p
+ isExclusion := false
+ if p[0] == '!' {
+ pPath = p[1:]
+ isExclusion = true
+ }
+
+ if !filepath.IsAbs(pPath) {
+ pPath = filepath.Join(absRoot, pPath)
+ }
+ absPattern := pPath
+ if isExclusion {
+ absPattern = fmt.Sprintf("!%s", pPath)
+ }
+ absPatterns = append(absPatterns, absPattern)
+ }
+ return absPatterns
+}
+
+func NewDockerPatternMatcher(repoRoot string, patterns []string) (*dockerPathMatcher, error) {
+ absRoot, err := filepath.Abs(repoRoot)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check if "*" is present in patterns
+ hasAllPattern := slices.Contains(patterns, "*")
+ if hasAllPattern {
+ // Remove all non-exclusion patterns (those that don't start with '!')
+ patterns = slices.DeleteFunc(patterns, func(p string) bool {
+ return p != "" && p[0] != '!' // Only keep exclusion patterns
+ })
+ }
+
+ pm, err := patternmatcher.New(absPatterns(absRoot, patterns))
+ if err != nil {
+ return nil, err
+ }
+
+ return &dockerPathMatcher{
+ repoRoot: absRoot,
+ matcher: pm,
+ }, nil
+}
+
+func readDockerignorePatterns(r io.Reader) ([]string, error) {
+ patterns, err := ignorefile.ReadAll(r)
+ if err != nil {
+ return nil, fmt.Errorf("error reading .dockerignore: %w", err)
+ }
+ return patterns, nil
+}
+
+func DockerIgnoreTesterFromContents(repoRoot string, contents string) (*dockerPathMatcher, error) {
+ patterns, err := ignorefile.ReadAll(strings.NewReader(contents))
+ if err != nil {
+ return nil, fmt.Errorf("error reading .dockerignore: %w", err)
+ }
+
+ return NewDockerPatternMatcher(repoRoot, patterns)
+}
diff --git a/pkg/watch/dockerignore_test.go b/pkg/watch/dockerignore_test.go
new file mode 100644
index 00000000000..6e88a857476
--- /dev/null
+++ b/pkg/watch/dockerignore_test.go
@@ -0,0 +1,108 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "testing"
+)
+
+func TestNewDockerPatternMatcher(t *testing.T) {
+ tests := []struct {
+ name string
+ repoRoot string
+ patterns []string
+ expectedErr bool
+ expectedRoot string
+ expectedPat []string
+ }{
+ {
+ name: "Basic patterns without wildcard",
+ repoRoot: "/repo",
+ patterns: []string{"dir1/", "file.txt"},
+ expectedErr: false,
+ expectedRoot: "/repo",
+ expectedPat: []string{"/repo/dir1", "/repo/file.txt"},
+ },
+ {
+ name: "Patterns with exclusion",
+ repoRoot: "/repo",
+ patterns: []string{"dir1/", "!file.txt"},
+ expectedErr: false,
+ expectedRoot: "/repo",
+ expectedPat: []string{"/repo/dir1", "!/repo/file.txt"},
+ },
+ {
+ name: "Wildcard with exclusion",
+ repoRoot: "/repo",
+ patterns: []string{"*", "!file.txt"},
+ expectedErr: false,
+ expectedRoot: "/repo",
+ expectedPat: []string{"!/repo/file.txt"},
+ },
+ {
+ name: "No patterns",
+ repoRoot: "/repo",
+ patterns: []string{},
+ expectedErr: false,
+ expectedRoot: "/repo",
+ expectedPat: nil,
+ },
+ {
+ name: "Only exclusion pattern",
+ repoRoot: "/repo",
+ patterns: []string{"!file.txt"},
+ expectedErr: false,
+ expectedRoot: "/repo",
+ expectedPat: []string{"!/repo/file.txt"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Call the function with the test data
+ matcher, err := NewDockerPatternMatcher(tt.repoRoot, tt.patterns)
+
+ // Check if we expect an error
+ if (err != nil) != tt.expectedErr {
+ t.Fatalf("expected error: %v, got: %v", tt.expectedErr, err)
+ }
+
+ // If no error is expected, check the output
+ if !tt.expectedErr {
+ if matcher.repoRoot != tt.expectedRoot {
+ t.Errorf("expected root: %v, got: %v", tt.expectedRoot, matcher.repoRoot)
+ }
+
+ // Compare patterns
+ actualPatterns := matcher.matcher.Patterns()
+ if len(actualPatterns) != len(tt.expectedPat) {
+ t.Errorf("expected patterns length: %v, got: %v", len(tt.expectedPat), len(actualPatterns))
+ }
+
+ for i, expectedPat := range tt.expectedPat {
+ actualPatternStr := actualPatterns[i].String()
+ if actualPatterns[i].Exclusion() {
+ actualPatternStr = "!" + actualPatternStr
+ }
+ if actualPatternStr != expectedPat {
+ t.Errorf("expected pattern: %v, got: %v", expectedPat, actualPatterns[i])
+ }
+ }
+ }
+ })
+ }
+}
diff --git a/pkg/watch/ephemeral.go b/pkg/watch/ephemeral.go
new file mode 100644
index 00000000000..77589a9413d
--- /dev/null
+++ b/pkg/watch/ephemeral.go
@@ -0,0 +1,59 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+// EphemeralPathMatcher filters out spurious changes that we don't want to
+// rebuild on, like IDE temp/lock files.
+//
+// This isn't an ideal solution. In an ideal world, the user would put
+// everything to ignore in their tiltignore/dockerignore files. This is a
+// stop-gap so they don't have a terrible experience if those files aren't
+// there or aren't in the right places.
+//
+// NOTE: The underlying `patternmatcher` is NOT always Goroutine-safe, so
+// this is not a singleton; we create an instance for each watcher currently.
+func EphemeralPathMatcher() PathMatcher {
+ golandPatterns := []string{"**/*___jb_old___", "**/*___jb_tmp___", "**/.idea/**"}
+ emacsPatterns := []string{"**/.#*", "**/#*#"}
+ // if .swp is taken (presumably because multiple vims are running in that dir),
+ // vim will go with .swo, .swn, etc, and then even .svz, .svy!
+ // https://github.com/vim/vim/blob/ea781459b9617aa47335061fcc78403495260315/src/memline.c#L5076
+ // ignoring .sw? seems dangerous, since things like .swf or .swi exist, but ignoring the first few
+ // seems safe and should catch most cases
+ vimPatterns := []string{"**/4913", "**/*~", "**/.*.swp", "**/.*.swx", "**/.*.swo", "**/.*.swn"}
+ // kate (the default text editor for KDE) uses a file similar to Vim's .swp
+ // files, but it doesn't have the "incrementing" character problem mentioned
+ // above
+ katePatterns := []string{"**/.*.kate-swp"}
+ // go stdlib creates tmpfiles to determine umask for setting permissions
+ // during file creation; they are then immediately deleted
+ // https://github.com/golang/go/blob/0b5218cf4e3e5c17344ea113af346e8e0836f6c4/src/cmd/go/internal/work/exec.go#L1764
+ goPatterns := []string{"**/*-go-tmp-umask"}
+
+ var allPatterns []string
+ allPatterns = append(allPatterns, golandPatterns...)
+ allPatterns = append(allPatterns, emacsPatterns...)
+ allPatterns = append(allPatterns, vimPatterns...)
+ allPatterns = append(allPatterns, katePatterns...)
+ allPatterns = append(allPatterns, goPatterns...)
+
+ matcher, err := NewDockerPatternMatcher("/", allPatterns)
+ if err != nil {
+ panic(err)
+ }
+ return matcher
+}
diff --git a/pkg/watch/ephemeral_test.go b/pkg/watch/ephemeral_test.go
new file mode 100644
index 00000000000..9f7b81819be
--- /dev/null
+++ b/pkg/watch/ephemeral_test.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2023 Docker Compose CLI authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package watch_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/docker/compose/v5/pkg/watch"
+)
+
+func TestEphemeralPathMatcher(t *testing.T) {
+ ignored := []string{
+ ".file.txt.swp",
+ "/path/file.txt~",
+ "/home/moby/proj/.idea/modules.xml",
+ ".#file.txt",
+ "#file.txt#",
+ "/dir/.file.txt.kate-swp",
+ "/go/app/1234-go-tmp-umask",
+ }
+ matcher := watch.EphemeralPathMatcher()
+ for _, p := range ignored {
+ ok, err := matcher.Matches(p)
+ require.NoErrorf(t, err, "Matching %s", p)
+ assert.Truef(t, ok, "Path %s should have matched", p)
+ }
+
+ const includedPath = "normal.txt"
+ ok, err := matcher.Matches(includedPath)
+ require.NoErrorf(t, err, "Matching %s", includedPath)
+ assert.Falsef(t, ok, "Path %s should NOT have matched", includedPath)
+}
diff --git a/pkg/watch/notify.go b/pkg/watch/notify.go
new file mode 100644
index 00000000000..d63f5caf28b
--- /dev/null
+++ b/pkg/watch/notify.go
@@ -0,0 +1,136 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "expvar"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+)
+
+var numberOfWatches = expvar.NewInt("watch.naive.numberOfWatches")
+
+type FileEvent string
+
+func NewFileEvent(p string) FileEvent {
+ if !filepath.IsAbs(p) {
+ panic(fmt.Sprintf("NewFileEvent only accepts absolute paths. Actual: %s", p))
+ }
+ return FileEvent(p)
+}
+
+type Notify interface {
+ // Start watching the paths set at init time
+ Start() error
+
+ // Stop watching and close all channels
+ Close() error
+
+ // A channel to read off incoming file changes
+ Events() chan FileEvent
+
+ // A channel to read off show-stopping errors
+ Errors() chan error
+}
+
+// When we specify directories to watch, we often want to
+// ignore some subset of the files under those directories.
+//
+// For example:
+// - Watch /src/repo, but ignore /src/repo/.git
+// - Watch /src/repo, but ignore everything in /src/repo/bazel-bin except /src/repo/bazel-bin/app-binary
+//
+// The PathMatcher interface helps us manage these ignores.
+type PathMatcher interface {
+ Matches(file string) (bool, error)
+
+ // If this matches the entire dir, we can often optimize filetree walks a bit.
+ MatchesEntireDir(file string) (bool, error)
+}
+
+// AnyMatcher is a PathMatcher to match any path
+type AnyMatcher struct{}
+
+func (AnyMatcher) Matches(f string) (bool, error) { return true, nil }
+func (AnyMatcher) MatchesEntireDir(f string) (bool, error) { return true, nil }
+
+var _ PathMatcher = AnyMatcher{}
+
+// EmptyMatcher is a PathMatcher to match no path
+type EmptyMatcher struct{}
+
+func (EmptyMatcher) Matches(f string) (bool, error) { return false, nil }
+func (EmptyMatcher) MatchesEntireDir(f string) (bool, error) { return false, nil }
+
+var _ PathMatcher = EmptyMatcher{}
+
+func NewWatcher(paths []string) (Notify, error) {
+ return newWatcher(paths)
+}
+
+const WindowsBufferSizeEnvVar = "COMPOSE_WATCH_WINDOWS_BUFFER_SIZE"
+
+const defaultBufferSize int = 65536
+
+func DesiredWindowsBufferSize() int {
+ envVar := os.Getenv(WindowsBufferSizeEnvVar)
+ if envVar != "" {
+ size, err := strconv.Atoi(envVar)
+ if err == nil {
+ return size
+ }
+ }
+ return defaultBufferSize
+}
+
+type CompositePathMatcher struct {
+ Matchers []PathMatcher
+}
+
+func NewCompositeMatcher(matchers ...PathMatcher) PathMatcher {
+ if len(matchers) == 0 {
+ return EmptyMatcher{}
+ }
+ return CompositePathMatcher{Matchers: matchers}
+}
+
+func (c CompositePathMatcher) Matches(f string) (bool, error) {
+ for _, t := range c.Matchers {
+ ret, err := t.Matches(f)
+ if err != nil {
+ return false, err
+ }
+ if ret {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func (c CompositePathMatcher) MatchesEntireDir(f string) (bool, error) {
+ for _, t := range c.Matchers {
+ matches, err := t.MatchesEntireDir(f)
+ if matches || err != nil {
+ return matches, err
+ }
+ }
+ return false, nil
+}
+
+var _ PathMatcher = CompositePathMatcher{}
diff --git a/pkg/watch/notify_test.go b/pkg/watch/notify_test.go
new file mode 100644
index 00000000000..9b68c4d0501
--- /dev/null
+++ b/pkg/watch/notify_test.go
@@ -0,0 +1,657 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// Each implementation of the notify interface should have the same basic
+// behavior.
+
+func TestWindowsBufferSize(t *testing.T) {
+ orig := os.Getenv(WindowsBufferSizeEnvVar)
+ defer os.Setenv(WindowsBufferSizeEnvVar, orig) //nolint:errcheck
+
+ err := os.Setenv(WindowsBufferSizeEnvVar, "")
+ require.NoError(t, err)
+ assert.Equal(t, defaultBufferSize, DesiredWindowsBufferSize())
+
+ err = os.Setenv(WindowsBufferSizeEnvVar, "a")
+ require.NoError(t, err)
+ assert.Equal(t, defaultBufferSize, DesiredWindowsBufferSize())
+
+ err = os.Setenv(WindowsBufferSizeEnvVar, "10")
+ require.NoError(t, err)
+ assert.Equal(t, 10, DesiredWindowsBufferSize())
+}
+
+func TestNoEvents(t *testing.T) {
+ f := newNotifyFixture(t)
+ f.assertEvents()
+}
+
+func TestNoWatches(t *testing.T) {
+ f := newNotifyFixture(t)
+ f.paths = nil
+ f.rebuildWatcher()
+ f.assertEvents()
+}
+
+func TestEventOrdering(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ // https://qualapps.blogspot.com/2010/05/understanding-readdirectorychangesw_19.html
+ t.Skip("Windows doesn't make great guarantees about duplicate/out-of-order events")
+ return
+ }
+ f := newNotifyFixture(t)
+
+ count := 8
+ dirs := make([]string, count)
+ for i := range dirs {
+ dir := f.TempDir("watched")
+ dirs[i] = dir
+ f.watch(dir)
+ }
+
+ f.fsync()
+ f.events = nil
+
+ var expected []string
+ for i, dir := range dirs {
+ base := fmt.Sprintf("%d.txt", i)
+ p := filepath.Join(dir, base)
+ err := os.WriteFile(p, []byte(base), os.FileMode(0o777))
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected = append(expected, filepath.Join(dir, base))
+ }
+
+ f.assertEvents(expected...)
+}
+
+// Simulate a git branch switch that creates a bunch
+// of directories, creates files in them, then deletes
+// them all quickly. Make sure there are no errors.
+func TestGitBranchSwitch(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ count := 10
+ dirs := make([]string, count)
+ for i := range dirs {
+ dir := f.TempDir("watched")
+ dirs[i] = dir
+ f.watch(dir)
+ }
+
+ f.fsync()
+ f.events = nil
+
+ // consume all the events in the background
+ ctx, cancel := context.WithCancel(context.Background())
+ done := f.consumeEventsInBackground(ctx)
+
+ for i, dir := range dirs {
+ for j := 0; j < count; j++ {
+ base := fmt.Sprintf("x/y/dir-%d/x.txt", j)
+ p := filepath.Join(dir, base)
+ f.WriteFile(p, "contents")
+ }
+
+ if i != 0 {
+ err := os.RemoveAll(dir)
+ require.NoError(t, err)
+ }
+ }
+
+ cancel()
+ err := <-done
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ f.fsync()
+ f.events = nil
+
+ // Make sure the watch on the first dir still works.
+ dir := dirs[0]
+ path := filepath.Join(dir, "change")
+
+ f.WriteFile(path, "hello\n")
+ f.fsync()
+
+ f.assertEvents(path)
+
+ // Make sure there are no errors in the out stream
+ assert.Empty(t, f.out.String())
+}
+
+func TestWatchesAreRecursive(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ root := f.TempDir("root")
+
+ // add a sub directory
+ subPath := filepath.Join(root, "sub")
+ f.MkdirAll(subPath)
+
+ // watch parent
+ f.watch(root)
+
+ f.fsync()
+ f.events = nil
+ // change sub directory
+ changeFilePath := filepath.Join(subPath, "change")
+ f.WriteFile(changeFilePath, "change")
+
+ f.assertEvents(changeFilePath)
+}
+
+func TestNewDirectoriesAreRecursivelyWatched(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ root := f.TempDir("root")
+
+ // watch parent
+ f.watch(root)
+ f.fsync()
+ f.events = nil
+
+ // add a sub directory
+ subPath := filepath.Join(root, "sub")
+ f.MkdirAll(subPath)
+
+ // change something inside sub directory
+ changeFilePath := filepath.Join(subPath, "change")
+ file, err := os.OpenFile(changeFilePath, os.O_RDONLY|os.O_CREATE, 0o666)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _ = file.Close()
+ f.assertEvents(subPath, changeFilePath)
+}
+
+func TestWatchNonExistentPath(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ root := f.TempDir("root")
+ path := filepath.Join(root, "change")
+
+ f.watch(path)
+ f.fsync()
+
+ d1 := "hello\ngo\n"
+ f.WriteFile(path, d1)
+ f.assertEvents(path)
+}
+
+func TestWatchNonExistentPathDoesNotFireSiblingEvent(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ root := f.TempDir("root")
+ watchedFile := filepath.Join(root, "a.txt")
+ unwatchedSibling := filepath.Join(root, "b.txt")
+
+ f.watch(watchedFile)
+ f.fsync()
+
+ d1 := "hello\ngo\n"
+ f.WriteFile(unwatchedSibling, d1)
+ f.assertEvents()
+}
+
+func TestRemove(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ root := f.TempDir("root")
+ path := filepath.Join(root, "change")
+
+ d1 := "hello\ngo\n"
+ f.WriteFile(path, d1)
+
+ f.watch(path)
+ f.fsync()
+ f.events = nil
+ err := os.Remove(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.assertEvents(path)
+}
+
+func TestRemoveAndAddBack(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ path := filepath.Join(f.paths[0], "change")
+
+ d1 := []byte("hello\ngo\n")
+ err := os.WriteFile(path, d1, 0o644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.watch(path)
+ f.assertEvents(path)
+
+ err = os.Remove(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ f.assertEvents(path)
+ f.events = nil
+
+ err = os.WriteFile(path, d1, 0o644)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ f.assertEvents(path)
+}
+
+func TestSingleFile(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ root := f.TempDir("root")
+ path := filepath.Join(root, "change")
+
+ d1 := "hello\ngo\n"
+ f.WriteFile(path, d1)
+
+ f.watch(path)
+ f.fsync()
+
+ d2 := []byte("hello\nworld\n")
+ err := os.WriteFile(path, d2, 0o644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.assertEvents(path)
+}
+
+func TestWriteBrokenLink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("no user-space symlinks on windows")
+ }
+ f := newNotifyFixture(t)
+
+ link := filepath.Join(f.paths[0], "brokenLink")
+ missingFile := filepath.Join(f.paths[0], "missingFile")
+ err := os.Symlink(missingFile, link)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ f.assertEvents(link)
+}
+
+func TestWriteGoodLink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("no user-space symlinks on windows")
+ }
+ f := newNotifyFixture(t)
+
+ goodFile := filepath.Join(f.paths[0], "goodFile")
+ err := os.WriteFile(goodFile, []byte("hello"), 0o644)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ link := filepath.Join(f.paths[0], "goodFileSymlink")
+ err = os.Symlink(goodFile, link)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ f.assertEvents(goodFile, link)
+}
+
+func TestWatchBrokenLink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("no user-space symlinks on windows")
+ }
+ f := newNotifyFixture(t)
+
+ newRoot, err := NewDir(t.Name())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ err := newRoot.TearDown()
+ if err != nil {
+ fmt.Printf("error tearing down temp dir: %v\n", err)
+ }
+ }()
+
+ link := filepath.Join(newRoot.Path(), "brokenLink")
+ missingFile := filepath.Join(newRoot.Path(), "missingFile")
+ err = os.Symlink(missingFile, link)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ f.watch(newRoot.Path())
+ err = os.Remove(link)
+ require.NoError(t, err)
+ f.assertEvents(link)
+}
+
+func TestMoveAndReplace(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ root := f.TempDir("root")
+ file := filepath.Join(root, "myfile")
+ f.WriteFile(file, "hello")
+
+ f.watch(file)
+ tmpFile := filepath.Join(root, ".myfile.swp")
+ f.WriteFile(tmpFile, "world")
+
+ err := os.Rename(tmpFile, file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ f.assertEvents(file)
+}
+
+func TestWatchBothDirAndFile(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ dir := f.JoinPath("foo")
+ fileA := f.JoinPath("foo", "a")
+ fileB := f.JoinPath("foo", "b")
+ f.WriteFile(fileA, "a")
+ f.WriteFile(fileB, "b")
+
+ f.watch(fileA)
+ f.watch(dir)
+ f.fsync()
+ f.events = nil
+
+ f.WriteFile(fileB, "b-new")
+ f.assertEvents(fileB)
+}
+
+func TestWatchNonexistentFileInNonexistentDirectoryCreatedSimultaneously(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ root := f.JoinPath("root")
+ err := os.Mkdir(root, 0o777)
+ if err != nil {
+ t.Fatal(err)
+ }
+ file := f.JoinPath("root", "parent", "a")
+
+ f.watch(file)
+ f.fsync()
+ f.events = nil
+ f.WriteFile(file, "hello")
+ f.assertEvents(file)
+}
+
+func TestWatchNonexistentDirectory(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ root := f.JoinPath("root")
+ err := os.Mkdir(root, 0o777)
+ if err != nil {
+ t.Fatal(err)
+ }
+ parent := f.JoinPath("parent")
+ file := f.JoinPath("parent", "a")
+
+ f.watch(parent)
+ f.fsync()
+ f.events = nil
+
+ err = os.Mkdir(parent, 0o777)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // for directories that were the root of an Add, we don't report creation, cf. watcher_darwin.go
+ f.assertEvents()
+
+ f.events = nil
+ f.WriteFile(file, "hello")
+
+ f.assertEvents(file)
+}
+
+func TestWatchNonexistentFileInNonexistentDirectory(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ root := f.JoinPath("root")
+ err := os.Mkdir(root, 0o777)
+ if err != nil {
+ t.Fatal(err)
+ }
+ parent := f.JoinPath("parent")
+ file := f.JoinPath("parent", "a")
+
+ f.watch(file)
+ f.assertEvents()
+
+ err = os.Mkdir(parent, 0o777)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ f.assertEvents()
+ f.WriteFile(file, "hello")
+ f.assertEvents(file)
+}
+
+func TestWatchCountInnerFile(t *testing.T) {
+ f := newNotifyFixture(t)
+
+ root := f.paths[0]
+ a := f.JoinPath(root, "a")
+ b := f.JoinPath(a, "b")
+ file := f.JoinPath(b, "bigFile")
+ f.WriteFile(file, "hello")
+ f.assertEvents(a, b, file)
+
+ expectedWatches := 3
+ if isRecursiveWatcher() {
+ expectedWatches = 1
+ }
+ assert.Equal(t, expectedWatches, int(numberOfWatches.Value()))
+}
+
+func isRecursiveWatcher() bool {
+ return runtime.GOOS == "darwin" || runtime.GOOS == "windows"
+}
+
+type notifyFixture struct {
+ ctx context.Context
+ cancel func()
+ out *bytes.Buffer
+ *TempDirFixture
+ notify Notify
+ paths []string
+ events []FileEvent
+}
+
+func newNotifyFixture(t *testing.T) *notifyFixture {
+ out := bytes.NewBuffer(nil)
+ ctx, cancel := context.WithCancel(context.Background())
+ nf := ¬ifyFixture{
+ ctx: ctx,
+ cancel: cancel,
+ TempDirFixture: NewTempDirFixture(t),
+ paths: []string{},
+ out: out,
+ }
+ nf.watch(nf.TempDir("watched"))
+ t.Cleanup(nf.tearDown)
+ return nf
+}
+
+func (f *notifyFixture) watch(path string) {
+ f.paths = append(f.paths, path)
+ f.rebuildWatcher()
+}
+
+func (f *notifyFixture) rebuildWatcher() {
+ // sync any outstanding events and close the old watcher
+ if f.notify != nil {
+ f.fsync()
+ f.closeWatcher()
+ }
+
+ // create a new watcher
+ notify, err := NewWatcher(f.paths)
+ if err != nil {
+ f.T().Fatal(err)
+ }
+ f.notify = notify
+ err = f.notify.Start()
+ if err != nil {
+ f.T().Fatal(err)
+ }
+}
+
+func (f *notifyFixture) assertEvents(expected ...string) {
+ f.fsync()
+ if runtime.GOOS == "windows" {
+ // NOTE(nick): It's unclear to me why an extra fsync() helps
+ // here, but it makes the I/O way more predictable.
+ f.fsync()
+ }
+
+ if len(f.events) != len(expected) {
+ f.T().Fatalf("Got %d events (expected %d): %v %v", len(f.events), len(expected), f.events, expected)
+ }
+
+ for i, actual := range f.events {
+ e := FileEvent(expected[i])
+ if actual != e {
+ f.T().Fatalf("Got event %v (expected %v)", actual, e)
+ }
+ }
+}
+
+func (f *notifyFixture) consumeEventsInBackground(ctx context.Context) chan error {
+ done := make(chan error)
+ go func() {
+ for {
+ select {
+ case <-f.ctx.Done():
+ close(done)
+ return
+ case <-ctx.Done():
+ close(done)
+ return
+ case err := <-f.notify.Errors():
+ done <- err
+ close(done)
+ return
+ case <-f.notify.Events():
+ }
+ }
+ }()
+ return done
+}
+
+func (f *notifyFixture) fsync() {
+ f.fsyncWithRetryCount(3)
+}
+
+func (f *notifyFixture) fsyncWithRetryCount(retryCount int) {
+ if len(f.paths) == 0 {
+ return
+ }
+
+ syncPathBase := fmt.Sprintf("sync-%d.txt", time.Now().UnixNano())
+ syncPath := filepath.Join(f.paths[0], syncPathBase)
+ anySyncPath := filepath.Join(f.paths[0], "sync-")
+ timeout := time.After(250 * time.Second)
+
+ f.WriteFile(syncPath, time.Now().String())
+
+F:
+ for {
+ select {
+ case <-f.ctx.Done():
+ return
+ case err := <-f.notify.Errors():
+ f.T().Fatal(err)
+
+ case event := <-f.notify.Events():
+ if strings.Contains(string(event), syncPath) {
+ break F
+ }
+ if strings.Contains(string(event), anySyncPath) {
+ continue
+ }
+
+ // Don't bother tracking duplicate changes to the same path
+ // for testing.
+ if len(f.events) > 0 && f.events[len(f.events)-1] == event {
+ continue
+ }
+
+ f.events = append(f.events, event)
+
+ case <-timeout:
+ if retryCount <= 0 {
+ f.T().Fatalf("fsync: timeout")
+ } else {
+ f.fsyncWithRetryCount(retryCount - 1)
+ }
+ return
+ }
+ }
+}
+
+func (f *notifyFixture) closeWatcher() {
+ notify := f.notify
+ err := notify.Close()
+ if err != nil {
+ f.T().Fatal(err)
+ }
+
+ // drain channels from watcher
+ go func() {
+ for range notify.Events() {
+ }
+ }()
+
+ go func() {
+ for range notify.Errors() {
+ }
+ }()
+}
+
+func (f *notifyFixture) tearDown() {
+ f.cancel()
+ f.closeWatcher()
+ numberOfWatches.Set(0)
+}
diff --git a/pkg/watch/paths.go b/pkg/watch/paths.go
new file mode 100644
index 00000000000..c0c893cd995
--- /dev/null
+++ b/pkg/watch/paths.go
@@ -0,0 +1,41 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+func greatestExistingAncestor(path string) (string, error) {
+ if path == string(filepath.Separator) ||
+ path == fmt.Sprintf("%s%s", filepath.VolumeName(path), string(filepath.Separator)) {
+ return "", fmt.Errorf("cannot watch root directory")
+ }
+
+ _, err := os.Stat(path)
+ if err != nil && !os.IsNotExist(err) {
+ return "", fmt.Errorf("os.Stat(%q): %w", path, err)
+ }
+
+ if os.IsNotExist(err) {
+ return greatestExistingAncestor(filepath.Dir(path))
+ }
+
+ return path, nil
+}
diff --git a/pkg/watch/paths_test.go b/pkg/watch/paths_test.go
new file mode 100644
index 00000000000..72b707e5163
--- /dev/null
+++ b/pkg/watch/paths_test.go
@@ -0,0 +1,44 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "runtime"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGreatestExistingAncestor(t *testing.T) {
+ f := NewTempDirFixture(t)
+
+ p, err := greatestExistingAncestor(f.Path())
+ require.NoError(t, err)
+ assert.Equal(t, f.Path(), p)
+
+ p, err = greatestExistingAncestor(f.JoinPath("missing"))
+ require.NoError(t, err)
+ assert.Equal(t, f.Path(), p)
+
+ missingTopLevel := "/missingDir/a/b/c"
+ if runtime.GOOS == "windows" {
+ missingTopLevel = "C:\\missingDir\\a\\b\\c"
+ }
+ _, err = greatestExistingAncestor(missingTopLevel)
+ assert.Contains(t, err.Error(), "cannot watch root directory")
+}
diff --git a/pkg/watch/temp.go b/pkg/watch/temp.go
new file mode 100644
index 00000000000..011f547c1b4
--- /dev/null
+++ b/pkg/watch/temp.go
@@ -0,0 +1,88 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// TempDir holds a temp directory and allows easy access to new temp directories.
+type TempDir struct {
+ dir string
+}
+
+// NewDir creates a new TempDir in the default location (typically $TMPDIR)
+func NewDir(prefix string) (*TempDir, error) {
+ return NewDirAtRoot("", prefix)
+}
+
+// NewDirAtRoot creates a new TempDir at the given root.
+func NewDirAtRoot(root, prefix string) (*TempDir, error) {
+ tmpDir, err := os.MkdirTemp(root, prefix)
+ if err != nil {
+ return nil, err
+ }
+
+ realTmpDir, err := filepath.EvalSymlinks(tmpDir)
+ if err != nil {
+ return nil, err
+ }
+
+ return &TempDir{dir: realTmpDir}, nil
+}
+
+// NewDirAtSlashTmp creates a new TempDir at /tmp
+func NewDirAtSlashTmp(prefix string) (*TempDir, error) {
+ fullyResolvedPath, err := filepath.EvalSymlinks("/tmp")
+ if err != nil {
+ return nil, err
+ }
+ return NewDirAtRoot(fullyResolvedPath, prefix)
+}
+
+// d.NewDir creates a new TempDir under d
+func (d *TempDir) NewDir(prefix string) (*TempDir, error) {
+ d2, err := os.MkdirTemp(d.dir, prefix)
+ if err != nil {
+ return nil, err
+ }
+ return &TempDir{d2}, nil
+}
+
+func (d *TempDir) NewDeterministicDir(name string) (*TempDir, error) {
+ d2 := filepath.Join(d.dir, name)
+ err := os.Mkdir(d2, 0o700)
+ if os.IsExist(err) {
+ return nil, err
+ } else if err != nil {
+ return nil, err
+ }
+ return &TempDir{d2}, nil
+}
+
+func (d *TempDir) TearDown() error {
+ return os.RemoveAll(d.dir)
+}
+
+func (d *TempDir) Path() string {
+ return d.dir
+}
+
+// Possible extensions:
+// temp file
+// named directories or files (e.g., we know we want one git repo for our object, but it should be temporary)
diff --git a/pkg/watch/temp_dir_fixture.go b/pkg/watch/temp_dir_fixture.go
new file mode 100644
index 00000000000..a0855e875c8
--- /dev/null
+++ b/pkg/watch/temp_dir_fixture.go
@@ -0,0 +1,199 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+type TempDirFixture struct {
+ t testing.TB
+ dir *TempDir
+ oldDir string
+}
+
+// everything not listed in this character class will get replaced by _, so that it's a safe filename
+var sanitizeForFilenameRe = regexp.MustCompile("[^a-zA-Z0-9.]")
+
+func SanitizeFileName(name string) string {
+ return sanitizeForFilenameRe.ReplaceAllString(name, "_")
+}
+
+func NewTempDirFixture(t testing.TB) *TempDirFixture {
+ dir, err := NewDir(SanitizeFileName(t.Name()))
+ if err != nil {
+ t.Fatalf("Error making temp dir: %v", err)
+ }
+
+ ret := &TempDirFixture{
+ t: t,
+ dir: dir,
+ }
+
+ t.Cleanup(ret.tearDown)
+
+ return ret
+}
+
+func (f *TempDirFixture) T() testing.TB {
+ return f.t
+}
+
+func (f *TempDirFixture) Path() string {
+ return f.dir.Path()
+}
+
+func (f *TempDirFixture) Chdir() {
+ cwd, err := os.Getwd()
+ if err != nil {
+ f.t.Fatal(err)
+ }
+
+ f.oldDir = cwd
+
+ err = os.Chdir(f.Path())
+ if err != nil {
+ f.t.Fatal(err)
+ }
+}
+
+func (f *TempDirFixture) JoinPath(path ...string) string {
+ p := []string{}
+ isAbs := len(path) > 0 && filepath.IsAbs(path[0])
+ if isAbs {
+ if !strings.HasPrefix(path[0], f.Path()) {
+ f.t.Fatalf("Path outside fixture tempdir are forbidden: %s", path[0])
+ }
+ } else {
+ p = append(p, f.Path())
+ }
+
+ p = append(p, path...)
+ return filepath.Join(p...)
+}
+
+func (f *TempDirFixture) JoinPaths(paths []string) []string {
+ joined := make([]string, len(paths))
+ for i, p := range paths {
+ joined[i] = f.JoinPath(p)
+ }
+ return joined
+}
+
+// Returns the full path to the file written.
+func (f *TempDirFixture) WriteFile(path string, contents string) string {
+ fullPath := f.JoinPath(path)
+ base := filepath.Dir(fullPath)
+ err := os.MkdirAll(base, os.FileMode(0o777))
+ if err != nil {
+ f.t.Fatal(err)
+ }
+ err = os.WriteFile(fullPath, []byte(contents), os.FileMode(0o777))
+ if err != nil {
+ f.t.Fatal(err)
+ }
+ return fullPath
+}
+
+// Returns the full path to the file written.
+func (f *TempDirFixture) CopyFile(originalPath, newPath string) {
+ contents, err := os.ReadFile(originalPath)
+ if err != nil {
+ f.t.Fatal(err)
+ }
+ f.WriteFile(newPath, string(contents))
+}
+
+// Read the file.
+func (f *TempDirFixture) ReadFile(path string) string {
+ fullPath := f.JoinPath(path)
+ contents, err := os.ReadFile(fullPath)
+ if err != nil {
+ f.t.Fatal(err)
+ }
+ return string(contents)
+}
+
+func (f *TempDirFixture) WriteSymlink(linkContents, destPath string) {
+ fullDestPath := f.JoinPath(destPath)
+ err := os.MkdirAll(filepath.Dir(fullDestPath), os.FileMode(0o777))
+ if err != nil {
+ f.t.Fatal(err)
+ }
+ err = os.Symlink(linkContents, fullDestPath)
+ if err != nil {
+ f.t.Fatal(err)
+ }
+}
+
+func (f *TempDirFixture) MkdirAll(path string) {
+ fullPath := f.JoinPath(path)
+ err := os.MkdirAll(fullPath, os.FileMode(0o777))
+ if err != nil {
+ f.t.Fatal(err)
+ }
+}
+
+func (f *TempDirFixture) TouchFiles(paths []string) {
+ for _, p := range paths {
+ f.WriteFile(p, "")
+ }
+}
+
+func (f *TempDirFixture) Rm(pathInRepo string) {
+ fullPath := f.JoinPath(pathInRepo)
+ err := os.RemoveAll(fullPath)
+ if err != nil {
+ f.t.Fatal(err)
+ }
+}
+
+func (f *TempDirFixture) NewFile(prefix string) (*os.File, error) {
+ return os.CreateTemp(f.dir.Path(), prefix)
+}
+
+func (f *TempDirFixture) TempDir(prefix string) string {
+ name, err := os.MkdirTemp(f.dir.Path(), prefix)
+ if err != nil {
+ f.t.Fatal(err)
+ }
+ return name
+}
+
+func (f *TempDirFixture) tearDown() {
+ if f.oldDir != "" {
+ err := os.Chdir(f.oldDir)
+ if err != nil {
+ f.t.Fatal(err)
+ }
+ }
+
+ err := f.dir.TearDown()
+ if err != nil && runtime.GOOS == "windows" &&
+ (strings.Contains(err.Error(), "The process cannot access the file") ||
+ strings.Contains(err.Error(), "Access is denied")) {
+ // NOTE(nick): I'm not convinced that this is a real problem.
+ // I think it might just be clean up of file notification I/O.
+ } else if err != nil {
+ f.t.Fatal(err)
+ }
+}
diff --git a/pkg/watch/watcher_darwin.go b/pkg/watch/watcher_darwin.go
new file mode 100644
index 00000000000..01c1fb6c908
--- /dev/null
+++ b/pkg/watch/watcher_darwin.go
@@ -0,0 +1,138 @@
+//go:build darwin
+// +build darwin
+
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ pathutil "github.com/docker/compose/v5/internal/paths"
+ "github.com/fsnotify/fsevents"
+)
+
+// A file watcher optimized for Darwin.
+// Uses FSEvents to avoid the terrible perf characteristics of kqueue. Requires CGO
+type fseventNotify struct {
+ stream *fsevents.EventStream
+ events chan FileEvent
+ errors chan error
+ stop chan struct{}
+
+ pathsWereWatching map[string]interface{}
+}
+
+func (d *fseventNotify) loop() {
+ for {
+ select {
+ case <-d.stop:
+ return
+ case events, ok := <-d.stream.Events:
+ if !ok {
+ return
+ }
+
+ for _, e := range events {
+ e.Path = filepath.Join(string(os.PathSeparator), e.Path)
+
+ _, isPathWereWatching := d.pathsWereWatching[e.Path]
+ if e.Flags&fsevents.ItemIsDir == fsevents.ItemIsDir && e.Flags&fsevents.ItemCreated == fsevents.ItemCreated && isPathWereWatching {
+ // This is the first create for the path that we're watching. We always get exactly one of these
+ // even after we get the HistoryDone event. Skip it.
+ continue
+ }
+
+ d.events <- NewFileEvent(e.Path)
+ }
+ }
+ }
+}
+
+// Add a path to be watched. Should only be called during initialization.
+func (d *fseventNotify) initAdd(name string) {
+ d.stream.Paths = append(d.stream.Paths, name)
+
+ if d.pathsWereWatching == nil {
+ d.pathsWereWatching = make(map[string]interface{})
+ }
+ d.pathsWereWatching[name] = struct{}{}
+}
+
+func (d *fseventNotify) Start() error {
+ if len(d.stream.Paths) == 0 {
+ return nil
+ }
+
+ numberOfWatches.Add(int64(len(d.stream.Paths)))
+
+ err := d.stream.Start()
+ if err != nil {
+ return err
+ }
+ go d.loop()
+ return nil
+}
+
+func (d *fseventNotify) Close() error {
+ numberOfWatches.Add(int64(-len(d.stream.Paths)))
+
+ d.stream.Stop()
+ close(d.errors)
+ close(d.stop)
+
+ return nil
+}
+
+func (d *fseventNotify) Events() chan FileEvent {
+ return d.events
+}
+
+func (d *fseventNotify) Errors() chan error {
+ return d.errors
+}
+
+func newWatcher(paths []string) (Notify, error) {
+ dw := &fseventNotify{
+ stream: &fsevents.EventStream{
+ Latency: 50 * time.Millisecond,
+ Flags: fsevents.FileEvents | fsevents.IgnoreSelf,
+ // NOTE(dmiller): this corresponds to the `sinceWhen` parameter in FSEventStreamCreate
+ // https://developer.apple.com/documentation/coreservices/1443980-fseventstreamcreate
+ EventID: fsevents.LatestEventID(),
+ },
+ events: make(chan FileEvent),
+ errors: make(chan error),
+ stop: make(chan struct{}),
+ }
+
+ paths = pathutil.EncompassingPaths(paths)
+ for _, path := range paths {
+ path, err := filepath.Abs(path)
+ if err != nil {
+ return nil, fmt.Errorf("newWatcher: %w", err)
+ }
+ dw.initAdd(path)
+ }
+
+ return dw, nil
+}
+
+var _ Notify = &fseventNotify{}
diff --git a/pkg/watch/watcher_naive.go b/pkg/watch/watcher_naive.go
new file mode 100644
index 00000000000..555bb5596b6
--- /dev/null
+++ b/pkg/watch/watcher_naive.go
@@ -0,0 +1,325 @@
+//go:build !darwin
+// +build !darwin
+
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ pathutil "github.com/docker/compose/v5/internal/paths"
+ "github.com/sirupsen/logrus"
+ "github.com/tilt-dev/fsnotify"
+)
+
+// A naive file watcher that uses the plain fsnotify API.
+// Used on all non-Darwin systems (including Windows & Linux).
+//
+// All OS-specific codepaths are handled by fsnotify.
+type naiveNotify struct {
+ // Paths that we're watching that should be passed up to the caller.
+ // Note that we may have to watch ancestors of these paths
+ // in order to fulfill the API promise.
+ //
+ // We often need to check if paths are a child of a path in
+ // the notify list. It might be better to store this in a tree
+ // structure, so we can filter the list quickly.
+ notifyList map[string]bool
+
+ isWatcherRecursive bool
+ watcher *fsnotify.Watcher
+ events chan fsnotify.Event
+ wrappedEvents chan FileEvent
+ errors chan error
+ numWatches int64
+}
+
+func (d *naiveNotify) Start() error {
+ if len(d.notifyList) == 0 {
+ return nil
+ }
+
+ pathsToWatch := []string{}
+ for path := range d.notifyList {
+ pathsToWatch = append(pathsToWatch, path)
+ }
+
+ pathsToWatch, err := greatestExistingAncestors(pathsToWatch)
+ if err != nil {
+ return err
+ }
+ if d.isWatcherRecursive {
+ pathsToWatch = pathutil.EncompassingPaths(pathsToWatch)
+ }
+
+ for _, name := range pathsToWatch {
+ fi, err := os.Stat(name)
+ if err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("notify.Add(%q): %w", name, err)
+ }
+
+ // if it's a file that doesn't exist,
+ // we should have caught that above, let's just skip it.
+ if os.IsNotExist(err) {
+ continue
+ }
+
+ if fi.IsDir() {
+ err = d.watchRecursively(name)
+ if err != nil {
+ return fmt.Errorf("notify.Add(%q): %w", name, err)
+ }
+ } else {
+ err = d.add(filepath.Dir(name))
+ if err != nil {
+ return fmt.Errorf("notify.Add(%q): %w", filepath.Dir(name), err)
+ }
+ }
+ }
+
+ go d.loop()
+
+ return nil
+}
+
+func (d *naiveNotify) watchRecursively(dir string) error {
+ if d.isWatcherRecursive {
+ err := d.add(dir)
+ if err == nil || os.IsNotExist(err) {
+ return nil
+ }
+ return fmt.Errorf("watcher.Add(%q): %w", dir, err)
+ }
+
+ return filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if !info.IsDir() {
+ return nil
+ }
+
+ if d.shouldSkipDir(path) {
+ logrus.Debugf("Ignoring directory and its contents (recursively): %s", path)
+ return filepath.SkipDir
+ }
+
+ err = d.add(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return fmt.Errorf("watcher.Add(%q): %w", path, err)
+ }
+ return nil
+ })
+}
+
+func (d *naiveNotify) Close() error {
+ numberOfWatches.Add(-d.numWatches)
+ d.numWatches = 0
+ return d.watcher.Close()
+}
+
+func (d *naiveNotify) Events() chan FileEvent {
+ return d.wrappedEvents
+}
+
+func (d *naiveNotify) Errors() chan error {
+ return d.errors
+}
+
+func (d *naiveNotify) loop() { //nolint:gocyclo
+ defer close(d.wrappedEvents)
+ for e := range d.events {
+ // The Windows fsnotify event stream sometimes gets events with empty names
+ // that are also sent to the error stream. Hmmmm...
+ if e.Name == "" {
+ continue
+ }
+
+ if e.Op&fsnotify.Create != fsnotify.Create {
+ if d.shouldNotify(e.Name) {
+ d.wrappedEvents <- FileEvent(e.Name)
+ }
+ continue
+ }
+
+ if d.isWatcherRecursive {
+ if d.shouldNotify(e.Name) {
+ d.wrappedEvents <- FileEvent(e.Name)
+ }
+ continue
+ }
+
+ // If the watcher is not recursive, we have to walk the tree
+ // and add watches manually. We fire the event while we're walking the tree.
+ // because it's a bit more elegant that way.
+ //
+ // TODO(dbentley): if there's a delete should we call d.watcher.Remove to prevent leaking?
+ err := filepath.WalkDir(e.Name, func(path string, info fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if d.shouldNotify(path) {
+ d.wrappedEvents <- FileEvent(path)
+ }
+
+ // TODO(dmiller): symlinks 😭
+
+ shouldWatch := false
+ if info.IsDir() {
+ // watch directories unless we can skip them entirely
+ if d.shouldSkipDir(path) {
+ return filepath.SkipDir
+ }
+
+ shouldWatch = true
+ } else {
+ // watch files that are explicitly named, but don't watch others
+ _, ok := d.notifyList[path]
+ if ok {
+ shouldWatch = true
+ }
+ }
+ if shouldWatch {
+ err := d.add(path)
+ if err != nil && !os.IsNotExist(err) {
+ logrus.Infof("Error watching path %s: %s", e.Name, err)
+ }
+ }
+ return nil
+ })
+ if err != nil && !os.IsNotExist(err) {
+ logrus.Infof("Error walking directory %s: %s", e.Name, err)
+ }
+ }
+}
+
+func (d *naiveNotify) shouldNotify(path string) bool {
+ if _, ok := d.notifyList[path]; ok {
+ // We generally don't care when directories change at the root of an ADD
+ stat, err := os.Lstat(path)
+ isDir := err == nil && stat.IsDir()
+ return !isDir
+ }
+
+ for root := range d.notifyList {
+ if pathutil.IsChild(root, path) {
+ return true
+ }
+ }
+ return false
+}
+
+func (d *naiveNotify) shouldSkipDir(path string) bool {
+ // If path is directly in the notifyList, we should always watch it.
+ if d.notifyList[path] {
+ return false
+ }
+
+ // Suppose we're watching
+ // /src/.tiltignore
+ // but the .tiltignore file doesn't exist.
+ //
+ // Our watcher will create an inotify watch on /src/.
+ //
+ // But then we want to make sure we don't recurse from /src/ down to /src/node_modules.
+ //
+ // To handle this case, we only want to traverse dirs that are:
+ // - A child of a directory that's in our notify list, or
+ // - A parent of a directory that's in our notify list
+ // (i.e., to cover the "path doesn't exist" case).
+ for root := range d.notifyList {
+ if pathutil.IsChild(root, path) || pathutil.IsChild(path, root) {
+ return false
+ }
+ }
+ return true
+}
+
+func (d *naiveNotify) add(path string) error {
+ err := d.watcher.Add(path)
+ if err != nil {
+ return err
+ }
+ d.numWatches++
+ numberOfWatches.Add(1)
+ return nil
+}
+
+func newWatcher(paths []string) (Notify, error) {
+ fsw, err := fsnotify.NewWatcher()
+ if err != nil {
+ if strings.Contains(err.Error(), "too many open files") && runtime.GOOS == "linux" {
+ return nil, fmt.Errorf("hit OS limits creating a watcher.\n" +
+ "Run 'sysctl fs.inotify.max_user_instances' to check your inotify limits.\n" +
+ "To raise them, run 'sudo sysctl fs.inotify.max_user_instances=1024'")
+ }
+ return nil, fmt.Errorf("creating file watcher: %w", err)
+ }
+ MaybeIncreaseBufferSize(fsw)
+
+ err = fsw.SetRecursive()
+ isWatcherRecursive := err == nil
+
+ wrappedEvents := make(chan FileEvent)
+ notifyList := make(map[string]bool, len(paths))
+ if isWatcherRecursive {
+ paths = pathutil.EncompassingPaths(paths)
+ }
+ for _, path := range paths {
+ path, err := filepath.Abs(path)
+ if err != nil {
+ return nil, fmt.Errorf("newWatcher: %w", err)
+ }
+ notifyList[path] = true
+ }
+
+ wmw := &naiveNotify{
+ notifyList: notifyList,
+ watcher: fsw,
+ events: fsw.Events,
+ wrappedEvents: wrappedEvents,
+ errors: fsw.Errors,
+ isWatcherRecursive: isWatcherRecursive,
+ }
+
+ return wmw, nil
+}
+
+var _ Notify = &naiveNotify{}
+
+func greatestExistingAncestors(paths []string) ([]string, error) {
+ result := []string{}
+ for _, p := range paths {
+ newP, err := greatestExistingAncestor(p)
+ if err != nil {
+ return nil, fmt.Errorf("finding ancestor of %s: %w", p, err)
+ }
+ result = append(result, newP)
+ }
+ return result, nil
+}
diff --git a/pkg/watch/watcher_naive_test.go b/pkg/watch/watcher_naive_test.go
new file mode 100644
index 00000000000..78acfdc5fee
--- /dev/null
+++ b/pkg/watch/watcher_naive_test.go
@@ -0,0 +1,159 @@
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestDontWatchEachFile(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skip("This test uses linux-specific inotify checks")
+ }
+
+ // fsnotify is not recursive, so we need to watch each directory
+ // you can watch individual files with fsnotify, but that is more prone to exhaust resources
+ // this test uses a Linux way to get the number of watches to make sure we're watching
+ // per-directory, not per-file
+ f := newNotifyFixture(t)
+
+ watched := f.TempDir("watched")
+
+ // there are a few different cases we want to test for because the code paths are slightly
+ // different:
+ // 1) initial: data there before we ever call watch
+ // 2) inplace: data we create while the watch is happening
+ // 3) staged: data we create in another directory and then atomically move into place
+
+ // initial
+ f.WriteFile(f.JoinPath(watched, "initial.txt"), "initial data")
+
+ initialDir := f.JoinPath(watched, "initial_dir")
+ if err := os.Mkdir(initialDir, 0o777); err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 100; i++ {
+ f.WriteFile(f.JoinPath(initialDir, fmt.Sprintf("%d", i)), "initial data")
+ }
+
+ f.watch(watched)
+ f.fsync()
+ if len(f.events) != 0 {
+ t.Fatalf("expected 0 initial events; got %d events: %v", len(f.events), f.events)
+ }
+ f.events = nil
+
+ // inplace
+ inplace := f.JoinPath(watched, "inplace")
+ if err := os.Mkdir(inplace, 0o777); err != nil {
+ t.Fatal(err)
+ }
+ f.WriteFile(f.JoinPath(inplace, "inplace.txt"), "inplace data")
+
+ inplaceDir := f.JoinPath(inplace, "inplace_dir")
+ if err := os.Mkdir(inplaceDir, 0o777); err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 100; i++ {
+ f.WriteFile(f.JoinPath(inplaceDir, fmt.Sprintf("%d", i)), "inplace data")
+ }
+
+ f.fsync()
+ if len(f.events) < 100 {
+ t.Fatalf("expected >100 inplace events; got %d events: %v", len(f.events), f.events)
+ }
+ f.events = nil
+
+ // staged
+ staged := f.TempDir("staged")
+ f.WriteFile(f.JoinPath(staged, "staged.txt"), "staged data")
+
+ stagedDir := f.JoinPath(staged, "staged_dir")
+ if err := os.Mkdir(stagedDir, 0o777); err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 100; i++ {
+ f.WriteFile(f.JoinPath(stagedDir, fmt.Sprintf("%d", i)), "staged data")
+ }
+
+ if err := os.Rename(staged, f.JoinPath(watched, "staged")); err != nil {
+ t.Fatal(err)
+ }
+
+ f.fsync()
+ if len(f.events) < 100 {
+ t.Fatalf("expected >100 staged events; got %d events: %v", len(f.events), f.events)
+ }
+ f.events = nil
+
+ n, err := inotifyNodes()
+ require.NoError(t, err)
+ if n > 10 {
+ t.Fatalf("watching more than 10 files: %d", n)
+ }
+}
+
+func inotifyNodes() (int, error) {
+ pid := os.Getpid()
+
+ output, err := exec.Command("/bin/sh", "-c", fmt.Sprintf(
+ "find /proc/%d/fd -lname anon_inode:inotify -printf '%%hinfo/%%f\n' | xargs cat | grep -c '^inotify'", pid)).Output()
+ if err != nil {
+ return 0, fmt.Errorf("error running command to determine number of watched files: %w\n %s", err, output)
+ }
+
+ n, err := strconv.Atoi(strings.TrimSpace(string(output)))
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse number of watched files: %w", err)
+ }
+ return n, nil
+}
+
+func TestDontRecurseWhenWatchingParentsOfNonExistentFiles(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skip("This test uses linux-specific inotify checks")
+ }
+
+ f := newNotifyFixture(t)
+
+ watched := f.TempDir("watched")
+ f.watch(filepath.Join(watched, ".tiltignore"))
+
+ excludedDir := f.JoinPath(watched, "excluded")
+ for i := 0; i < 10; i++ {
+ f.WriteFile(f.JoinPath(excludedDir, fmt.Sprintf("%d", i), "data.txt"), "initial data")
+ }
+ f.fsync()
+
+ n, err := inotifyNodes()
+ require.NoError(t, err)
+ if n > 5 {
+ t.Fatalf("watching more than 5 files: %d", n)
+ }
+}
diff --git a/pkg/progress/writer_test.go b/pkg/watch/watcher_nonwin.go
similarity index 74%
rename from pkg/progress/writer_test.go
rename to pkg/watch/watcher_nonwin.go
index 2933811b06d..36071331887 100644
--- a/pkg/progress/writer_test.go
+++ b/pkg/watch/watcher_nonwin.go
@@ -1,3 +1,6 @@
+//go:build !windows
+// +build !windows
+
/*
Copyright 2020 Docker Compose CLI authors
@@ -14,18 +17,10 @@
limitations under the License.
*/
-package progress
-
-import (
- "context"
- "testing"
-
- "gotest.tools/v3/assert"
-)
+package watch
-func TestNoopWriter(t *testing.T) {
- todo := context.TODO()
- writer := ContextWriter(todo)
+import "github.com/tilt-dev/fsnotify"
- assert.Equal(t, writer, &noopWriter{})
+func MaybeIncreaseBufferSize(w *fsnotify.Watcher) {
+ // Not needed on non-windows
}
diff --git a/pkg/watch/watcher_windows.go b/pkg/watch/watcher_windows.go
new file mode 100644
index 00000000000..69af1ea5d01
--- /dev/null
+++ b/pkg/watch/watcher_windows.go
@@ -0,0 +1,36 @@
+//go:build windows
+// +build windows
+
+/*
+ Copyright 2020 Docker Compose CLI authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package watch
+
+import (
+ "github.com/tilt-dev/fsnotify"
+)
+
+// TODO(nick): I think the ideal API would be to automatically increase the
+// size of the buffer when we exceed capacity. But this gets messy,
+// because each time we get a short read error, we need to invalidate
+// everything we know about the currently changed files. So for now,
+// we just provide a way for people to increase the buffer ourselves.
+//
+// It might also pay to be clever about sizing the buffer
+// relative the number of files in the directory we're watching.
+func MaybeIncreaseBufferSize(w *fsnotify.Watcher) {
+ w.SetBufferSize(DesiredWindowsBufferSize())
+}
diff --git a/scripts/validate/check-go-mod b/scripts/validate/check-go-mod
deleted file mode 100755
index ad45b563b20..00000000000
--- a/scripts/validate/check-go-mod
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-
-# Copyright Docker Compose CLI authors
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-set -uo pipefail
-mkdir -p /tmp/gomod
-cp go.* /tmp/gomod/
-go mod tidy
-DIFF=$(diff go.mod /tmp/gomod/go.mod && diff go.sum /tmp/gomod/go.sum)
-if [ "$DIFF" ]; then
- echo
- echo "go.mod and go.sum are not up to date"
- echo
- echo "$DIFF"
- echo
- exit 1
-else
- echo "go.mod is correct"
-fi;