diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..45060a522 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +.github/** +.git/** +.git* diff --git a/.gitattributes b/.gitattributes index 14a112269..778f645e2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,3 +1,2 @@ /*/**/Dockerfile linguist-generated /*/**/docker-entrypoint.sh linguist-generated -/Dockerfile*.template linguist-language=Dockerfile diff --git a/.github/actions/apply-docker-version/action.yml b/.github/actions/apply-docker-version/action.yml new file mode 100644 index 000000000..97dd05fb0 --- /dev/null +++ b/.github/actions/apply-docker-version/action.yml @@ -0,0 +1,36 @@ +inputs: + release_tag: + description: 'Release tag to build' + required: true + release_version_branch: + description: 'Release version branch to commit to' + required: true + +outputs: + changed_files: + description: 'List of files that were modified' + value: ${{ steps.apply-version.outputs.changed_files }} + +runs: + using: "composite" + steps: + - name: Checkout common functions + uses: actions/checkout@v4 + with: + repository: redis-developer/redis-oss-release-automation + ref: main + path: redis-oss-release-automation + + - name: Apply docker version + id: apply-version + shell: bash + run: | + ${{ github.action_path }}/apply-docker-version.sh ${{ inputs.release_tag }} + + - name: Create verified commit + if: steps.apply-version.outputs.changed_files != '' + uses: iarekylew00t/verified-bot-commit@v1 + with: + message: ${{ inputs.release_tag }} + files: ${{ steps.apply-version.outputs.changed_files }} + ref: ${{ inputs.release_version_branch }} diff --git a/.github/actions/apply-docker-version/apply-docker-version.sh b/.github/actions/apply-docker-version/apply-docker-version.sh new file mode 100755 index 000000000..0078b05cb --- /dev/null +++ b/.github/actions/apply-docker-version/apply-docker-version.sh @@ -0,0 +1,104 @@ +#!/bin/bash +set -e + +# This script updates Redis version in Dockerfiles using environment variables +# REDIS_ARCHIVE_URL and REDIS_ARCHIVE_SHA, then commits changes if any were made. + +# shellcheck disable=SC2034 +last_cmd_stdout="" +# shellcheck disable=SC2034 +last_cmd_stderr="" +# shellcheck disable=SC2034 +last_cmd_result=0 +# shellcheck disable=SC2034 +VERBOSITY=1 + + + +SCRIPT_DIR="$(dirname -- "$( readlink -f -- "$0"; )")" +# shellcheck disable=SC1091 +. "$SCRIPT_DIR/../common/func.sh" + +source_helper_file helpers.sh + +# Input TAG is expected in $1 +TAG="$1" + +if [ -z "$TAG" ]; then + echo "Error: TAG is required as first argument" + exit 1 +fi + +# Check if required environment variables are set +if [ -z "$REDIS_ARCHIVE_URL" ]; then + echo "Error: REDIS_ARCHIVE_URL environment variable is not set" + exit 1 +fi + +if [ -z "$REDIS_ARCHIVE_SHA" ]; then + echo "Error: REDIS_ARCHIVE_SHA environment variable is not set" + exit 1 +fi + +echo "TAG: $TAG" +echo "REDIS_ARCHIVE_URL: $REDIS_ARCHIVE_URL" +echo "REDIS_ARCHIVE_SHA: $REDIS_ARCHIVE_SHA" + +# Function to update Dockerfile +update_dockerfile() { + local dockerfile="$1" + local updated=false + + if [ ! -f "$dockerfile" ]; then + echo "Warning: $dockerfile not found, skipping" + return 1 + fi + + echo "Updating $dockerfile..." + + # Update REDIS_DOWNLOAD_URL + if grep -q "^ENV REDIS_DOWNLOAD_URL=" "$dockerfile"; then + sed -i "s|^ENV REDIS_DOWNLOAD_URL=.*|ENV REDIS_DOWNLOAD_URL=$REDIS_ARCHIVE_URL|" "$dockerfile" + else + echo "Cannot update $dockerfile, ENV REDIS_DOWNLOAD_URL not found" + return 1 + fi + + + # Update REDIS_DOWNLOAD_SHA + if grep -q "^ENV REDIS_DOWNLOAD_SHA=" "$dockerfile"; then + sed -i "s|^ENV REDIS_DOWNLOAD_SHA=.*|ENV REDIS_DOWNLOAD_SHA=$REDIS_ARCHIVE_SHA|" "$dockerfile" + else + echo "Cannot update $dockerfile, ENV REDIS_DOWNLOAD_SHA not found" + return 1 + fi +} + +docker_files=("debian/Dockerfile" "alpine/Dockerfile") +# Track which files were modified +changed_files=() + +for dockerfile in "${docker_files[@]}"; do + update_dockerfile "$dockerfile" +done + +changed_files=($(git diff --name-only "${docker_files[@]}")) + +# Output the list of changed files for GitHub Actions +if [ ${#changed_files[@]} -gt 0 ]; then + echo "Files were modified:" + printf '%s\n' "${changed_files[@]}" + + # Set GitHub Actions output + changed_files_output=$(printf '%s\n' "${changed_files[@]}") + { + echo "changed_files<> "$GITHUB_OUTPUT" + + echo "Changed files output set for next step" +else + echo "No files were modified" + echo "changed_files=" >> "$GITHUB_OUTPUT" +fi \ No newline at end of file diff --git a/.github/actions/build-and-tag-locally/action.yml b/.github/actions/build-and-tag-locally/action.yml new file mode 100644 index 000000000..34b0caa47 --- /dev/null +++ b/.github/actions/build-and-tag-locally/action.yml @@ -0,0 +1,270 @@ +name: Build and Test + +inputs: + distribution: + description: "Distribution flavor" + default: "debian" + platform: + description: "Platform" + required: true + publish_image: + description: "Publish image to Docker Hub" + default: "false" + registry_username: + description: "Docker Hub username" + required: false + registry_password: + description: "Docker Hub password" + required: false + registry_repository: + description: 'Repository to push the image to' + required: false + release_tag: + description: 'Release tag to build' + required: false + +runs: + using: "composite" + steps: + - name: Install QEMU + shell: bash + run: | + sudo apt-get update + sudo apt-get install -y qemu-user-static + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Calculate architecture name + id: platform + shell: bash + run: | + case ${{ inputs.platform }} in + linux/amd64) + plaform_name="amd64" + ;; + linux/arm64) + plaform_name="arm64" + ;; + linux/arm/v5) + plaform_name="arm-v5" + ;; + linux/arm/v6) + plaform_name="arm-v6" + ;; + linux/arm/v7) + plaform_name="arm-v7" + ;; + linux/i386) + plaform_name="i386" + ;; + linux/mips64le) + plaform_name="mips64le" + ;; + linux/ppc64le) + plaform_name="ppc64le" + ;; + linux/riscv64) + plaform_name="riscv64" + ;; + linux/s390x) + plaform_name="s390x" + ;; + *) + echo "Architecture not supported: ${{ inputs.platform }}" + exit 1 + ;; + esac + echo "display_name=$plaform_name" >> "$GITHUB_OUTPUT" + + - name: Clean up + shell: bash + run: | + docker rm -f sanity-test-${{ steps.platform.outputs.display_name }} || true + docker rmi -f ${{ github.sha }}:${{ steps.platform.outputs.display_name }} || true + + - name: Docker Login + uses: docker/login-action@v3 + if: inputs.publish_image == 'true' + with: + registry: ${{ inputs.registry_repository }} + username: ${{ inputs.registry_username }} + password: ${{ inputs.registry_password }} + + - name: Build + uses: docker/build-push-action@v6 + with: + context: ${{ inputs.distribution }} + push: false + load: true + platforms: ${{ inputs.platform }} + tags: ${{ github.sha }}:${{ steps.platform.outputs.display_name }} + cache-from: type=gha,scope=${{ inputs.distribution }}-${{ steps.platform.outputs.display_name }} + cache-to: type=gha,mode=max,scope=${{ inputs.distribution }}-${{ steps.platform.outputs.display_name }} + + - name: Save image + shell: bash + run: | + docker save -o /tmp/image-${{ steps.platform.outputs.display_name }}.tar ${{ github.sha }}:${{ steps.platform.outputs.display_name }} + + - name: Upload image + uses: actions/upload-artifact@v4 + with: + name: ${{ steps.platform.outputs.display_name }}-${{ inputs.distribution }}-docker-image.tar + path: /tmp/image-${{ steps.platform.outputs.display_name }}.tar + retention-days: 45 + + - name: Run container + shell: bash + if: ${{ contains(fromJSON('["amd64", "i386"]'), steps.platform.outputs.display_name) }} + run: | + docker run -d --name sanity-test-${{ steps.platform.outputs.display_name }} ${{ github.sha }}:${{ steps.platform.outputs.display_name }} + + - name: Container Logs + if: ${{ contains(fromJSON('["amd64", "i386"]'), steps.platform.outputs.display_name) }} + shell: bash + run: | + docker logs sanity-test-${{ steps.platform.outputs.display_name }} + + - name: Sanity Tests + if: ${{ contains(fromJSON('["amd64", "i386"]'), steps.platform.outputs.display_name) }} + shell: bash + run: | + docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli ping + docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli info server + + - name: Verify installed modules + if: ${{ contains(fromJSON('["amd64"]'), steps.platform.outputs.display_name) }} + shell: bash + run: | + modules=$(docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli module list) + echo "Installed modules:" + echo "$modules" + missing_modules=() + for module in "bf" "search" "timeseries" "ReJSON"; do + if ! echo "$modules" | grep -q "$module"; then + missing_modules+=("$module") + fi + done + if [ ${#missing_modules[@]} -eq 0 ]; then + echo "All required modules are installed" + else + echo "The following modules are missing: ${missing_modules[*]}" + exit 1 + fi + + - name: Test RedisBloom + if: ${{ contains(fromJSON('["amd64"]'), steps.platform.outputs.display_name) }} + shell: bash + run: | + docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli BF.ADD popular_keys "redis:hash" + docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli BF.ADD popular_keys "redis:set" + [ "$(docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli BF.EXISTS popular_keys "redis:hash")" = "1" ] || { echo "RedisBloom test failed: 'redis:hash' not found"; exit 1; } + [ "$(docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli BF.EXISTS popular_keys "redis:list")" = "0" ] || { echo "RedisBloom test failed: 'redis:list' found unexpectedly"; exit 1; } + echo "RedisBloom test passed successfully" + + - name: Test RediSearch + if: ${{ contains(fromJSON('["amd64"]'), steps.platform.outputs.display_name) }} + shell: bash + run: | + docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli FT.CREATE redis_commands ON HASH PREFIX 1 cmd: SCHEMA name TEXT SORTABLE description TEXT + docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli HSET cmd:set name "SET" description "Set the string value of a key" + docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli HSET cmd:get name "GET" description "Get the value of a key" + result=$(docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli FT.SEARCH redis_commands "value") + if echo "$result" | grep -q "Set the string value of a key" && echo "$result" | grep -q "Get the value of a key"; then + echo "RediSearch test passed successfully" + else + echo "RediSearch test failed: expected commands not found in search results" + exit 1 + fi + + - name: Test RedisTimeSeries + if: ${{ contains(fromJSON('["amd64"]'), steps.platform.outputs.display_name) }} + shell: bash + run: | + docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli TS.CREATE redis:cpu:usage RETENTION 86400 + docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli TS.ADD redis:cpu:usage "*" 80 + docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli TS.ADD redis:cpu:usage "*" 65 + docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli TS.ADD redis:cpu:usage "*" 70 + result=$(docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli TS.RANGE redis:cpu:usage - + COUNT 3) + if echo "$result" | grep -q "80" && echo "$result" | grep -q "65" && echo "$result" | grep -q "70"; then + echo "RedisTimeSeries test passed successfully" + else + echo "RedisTimeSeries test failed: expected values not found in time series" + exit 1 + fi + + - name: Test ReJSON + if: ${{ contains(fromJSON('["amd64"]'), steps.platform.outputs.display_name) }} + shell: bash + run: | + docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli JSON.SET redis:config $ '{"maxmemory":"2gb","maxmemory-policy":"allkeys-lru"}' + result=$(docker exec sanity-test-${{ steps.platform.outputs.display_name }} redis-cli JSON.GET redis:config $.maxmemory-policy) + cleaned_result=$(echo $result | tr -d '[]"') + if [ "$cleaned_result" = "allkeys-lru" ]; then + echo "ReJSON test passed successfully" + else + echo "ReJSON test failed: expected 'allkeys-lru', got $result" + exit 1 + fi + + - name: Test the entrypoint + id: test_entrypoint + if: ${{ contains(fromJSON('["amd64", "i386"]'), steps.platform.outputs.display_name) }} + shell: bash + run: > + cd test && env + PLATFORM=${{ steps.platform.outputs.display_name }} + REDIS_IMG=${{ github.sha }}:${{ steps.platform.outputs.display_name }} + ./run-entrypoint-tests.sh + -- --output-junit-xml=report-entrypoint.xml + + - name: Test Report + uses: dorny/test-reporter@v2 + # run this step even if previous step failed, but not if it was skipped + if: ${{ !cancelled() && steps.test_entrypoint.conclusion != 'skipped' }} + with: + name: Entrypoint Tests + path: test/report-entrypoint.xml + reporter: java-junit + + - name: Format registry tag + id: format-registry-tag + shell: bash + run: | + printf "tag=%s:%s%s-%s-%s" \ + "${{ inputs.registry_repository }}" \ + "${{ inputs.release_tag != '' && format('{0}-', inputs.release_tag || '') }}" \ + "${{ github.sha }}" \ + "${{ inputs.distribution }}" \ + "${{ steps.platform.outputs.display_name }}" \ + | tr '[:upper:]' '[:lower:]' >> "$GITHUB_OUTPUT" + + - name: Push image + uses: docker/build-push-action@v6 + if: ${{ inputs.publish_image == 'true' && contains(fromJSON('["amd64"]'), steps.platform.outputs.display_name) }} + with: + context: ${{ inputs.distribution }} + push: true + tags: ${{ steps.format-registry-tag.outputs.tag }} + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Save image URL to artifact + shell: bash + run: | + if [[ "${{ inputs.publish_image }}" == "true" && "${{ contains(fromJSON('["amd64"]'), steps.platform.outputs.display_name) }}" == "true" ]]; then + # Create a file with the image URL for this specific build + mkdir -p /tmp/image-urls + echo "${{ steps.format-registry-tag.outputs.tag }}" > "/tmp/image-urls/${{ inputs.distribution }}-${{ steps.platform.outputs.display_name }}.txt" + echo "Image URL saved: ${{ steps.format-registry-tag.outputs.tag }}" + else + echo "Image not published for this platform/distribution combination" + fi + + - name: Upload image URL artifact + uses: actions/upload-artifact@v4 + if: ${{ inputs.publish_image == 'true' && contains(fromJSON('["amd64"]'), steps.platform.outputs.display_name) }} + with: + name: image-url-${{ inputs.distribution }}-${{ steps.platform.outputs.display_name }} + path: /tmp/image-urls/${{ inputs.distribution }}-${{ steps.platform.outputs.display_name }}.txt + retention-days: 1 diff --git a/.github/actions/common/func.sh b/.github/actions/common/func.sh new file mode 100644 index 000000000..ae727dcea --- /dev/null +++ b/.github/actions/common/func.sh @@ -0,0 +1,173 @@ +#!/bin/bash + +# Sources a helper file from multiple possible locations (GITHUB_WORKSPACE, RELEASE_AUTOMATION_DIR, or relative path) +source_helper_file() { + local helper_file="$1" + local helper_errors="" + for dir in "GITHUB_WORKSPACE:$GITHUB_WORKSPACE/redis-oss-release-automation" "RELEASE_AUTOMATION_DIR:$RELEASE_AUTOMATION_DIR" ":../redis-oss-release-automation"; do + local var_name="${dir%%:*}" + local dir="${dir#*:}" + if [ -n "$var_name" ]; then + var_name="\$$var_name" + fi + local helper_path="$dir/.github/actions/common/$helper_file" + if [ -f "$helper_path" ]; then + helper_errors="" + # shellcheck disable=SC1090 + . "$helper_path" + break + else + helper_errors=$(printf "%s\n %s: %s" "$helper_errors" "$var_name" "$helper_path") + fi + done + if [ -n "$helper_errors" ]; then + echo "Error: $helper_file not found in any of the following locations: $helper_errors" >&2 + exit 1 + fi +} + +# Splits a Redis version string into major:minor:patch:suffix components +redis_version_split() { + local version + local numerics + # shellcheck disable=SC2001 + version=$(echo "$1" | sed 's/^v//') + + numerics=$(echo "$version" | grep -Po '^[1-9][0-9]*\.[0-9]+(\.[0-9]+|)' || :) + if [ -z "$numerics" ]; then + console_output 2 red "Cannot split version '$version', incorrect version format" + return 1 + fi + local major minor patch suffix + IFS=. read -r major minor patch < <(echo "$numerics") + suffix=${version:${#numerics}} + printf "%s:%s:%s:%s\n" "$major" "$minor" "$patch" "$suffix" +} + +slack_format_docker_image_urls_message() { + # Parse the image URLs from JSON array + jq --arg release_tag "$1" --arg footer "$2" ' + map( + capture("(?(?[^:]+:)(?[1-9][0-9]*[.][0-9]+[.][0-9]+(-[a-z0-9]+)*)-(?[a-f0-9]{40,})-(?[^-]+)-(?[^-]+))$") + ) + as $items + | { + icon_emoji: ":redis-circle:", + text: ("đŸŗ Docker Images Published for Redis: " + $release_tag), + blocks: [ + { + "type": "header", + "text": { "type": "plain_text", "text": ("đŸŗ Docker Images Published for Release " + $release_tag) } + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ( + "The following Docker images have been published to Github Container Registry:\n\n" + + ( + $items + | map( + "Distribution: *" + .distro + "* " + + "Architecture: *" + .arch + "*" + + "\n```\n" + .url + "\n```" + ) + | join("\n\n") + ) + ) + } + }, + { + "type": "context", + "elements": [ + { "type": "mrkdwn", "text": $footer } + ] + } + ] + } + ' +} + +slack_format_docker_PR_message() { + release_tag=$1 + url=$2 + footer=$3 + +# Create Slack message payload + cat << EOF +{ +"icon_emoji": ":redis-circle:", +"text": "đŸŗ Docker Library PR created for Redis: $release_tag", +"blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": "đŸŗ Docker Library PR created for Redis: $release_tag" + } + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "$url" + } + }, + { + "type": "context", + "elements": [ + { + "type": "mrkdwn", + "text": "$footer" + } + ] + } +] +} +EOF +} + +slack_format_failure_message() { + header=$1 + workflow_url=$2 + footer=$3 + if [ -z "$header" ]; then + header=" " + fi + if [ -z "$footer" ]; then + footer=" " + fi + +# Create Slack message payload + cat << EOF +{ +"icon_emoji": ":redis-circle:", +"text": "$header", +"blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": "❌ $header" + } + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "Workflow run: $workflow_url" + } + }, + { + "type": "context", + "elements": [ + { + "type": "mrkdwn", + "text": "$footer" + } + ] + } +] +} +EOF +} \ No newline at end of file diff --git a/.github/workflows/build_release_automation.yml b/.github/workflows/build_release_automation.yml new file mode 100644 index 000000000..a9c0ff005 --- /dev/null +++ b/.github/workflows/build_release_automation.yml @@ -0,0 +1,110 @@ +name: Build Release Automation Docker Image + +on: + workflow_dispatch: + inputs: + image_tag: + description: 'Docker image tag (default: latest)' + required: false + default: 'latest' + push_to_ghcr: + description: 'Push image to GHCR' + required: false + default: true + type: boolean + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }}/release-automation + +jobs: + build-test-and-push: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Extract metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=raw,value=${{ github.event.inputs.image_tag }} + type=raw,value=latest,enable={{is_default_branch}} + type=sha,prefix={{branch}}- + + - name: Build Docker image (without pushing) + uses: docker/build-push-action@v5 + with: + context: ./release-automation + file: ./release-automation/docker/Dockerfile + push: false + tags: test-image:latest + load: true + cache-from: type=gha + cache-to: type=gha,mode=max + + # Integration tests do need access to git repository + - name: Test the built image + run: | + # Start container and install dev dependencies for testing + docker run --rm \ + -v ${{ github.workspace }}:/workspace \ + -w /workspace \ + --entrypoint /bin/bash \ + test-image:latest \ + -c " + cd release-automation + set -e + echo '=== Installing test dependencies ===' + pip install pytest pytest-cov + echo '=== Running tests ===' + pytest -v tests/ + " + + - name: Log in to Container Registry + if: ${{ github.event.inputs.push_to_ghcr == 'true' }} + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Tag and push image + if: ${{ github.event.inputs.push_to_ghcr == 'true' }} + run: | + # Tag the tested image with the proper tags + echo '${{ steps.meta.outputs.tags }}' | while read -r tag; do + docker tag test-image:latest "$tag" + docker push "$tag" + done + + - name: Output image details + run: | + echo "## Docker Image Built Successfully! đŸŗ" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "✅ **Tests passed**" >> $GITHUB_STEP_SUMMARY + echo "đŸ—ī¸ **Production image built** (without dev dependencies)" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Image:** \`${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}\`" >> $GITHUB_STEP_SUMMARY + echo "**Tags:**" >> $GITHUB_STEP_SUMMARY + echo '${{ steps.meta.outputs.tags }}' | sed 's/^/- `/' | sed 's/$/`/' >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + if [[ "${{ github.event.inputs.push_to_ghcr }}" == "true" ]]; then + echo "✅ **Image pushed to GHCR**" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "To pull the image:" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY + echo "docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.event.inputs.image_tag }}" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`" >> $GITHUB_STEP_SUMMARY + else + echo "â„šī¸ **Image built locally only (not pushed)**" >> $GITHUB_STEP_SUMMARY + fi diff --git a/.github/workflows/pre-merge.yml b/.github/workflows/pre-merge.yml new file mode 100644 index 000000000..2f363aff5 --- /dev/null +++ b/.github/workflows/pre-merge.yml @@ -0,0 +1,135 @@ + +name: Build and Test +on: + pull_request: + branches: + - master + - release/* + workflow_call: + inputs: + release_tag: + description: 'Release tag to build' + required: true + type: string + publish_image: + description: 'Publish Docker image to GHCR' + required: false + type: boolean + default: false + outputs: + docker_image_urls: + description: 'Array of Docker image URLs that were published' + value: ${{ jobs.collect-image-urls.outputs.docker_image_urls }} + +jobs: + build-and-test: + runs-on: ${{ contains(matrix.platform, 'arm64') && 'ubuntu24-arm64-2-8' || 'ubuntu-latest' }} + strategy: + fail-fast: false + matrix: + distribution: + - debian + - alpine + platform: + - linux/amd64 + - linux/i386 + - linux/arm/v5 + - linux/arm/v6 + - linux/arm/v7 + - linux/mips64le + - linux/ppc64le + - linux/s390x + - linux/arm64 + - linux/riscv64 + exclude: + - distribution: alpine + platform: linux/mips64le + - distribution: alpine + platform: linux/arm/v5 + - distribution: debian + platform: linux/riscv64 + - distribution: debian + platform: linux/arm/v6 + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Ensure release branch + if: ${{ inputs.release_tag }} + uses: redis-developer/redis-oss-release-automation/.github/actions/ensure-release-branch@main + with: + release_tag: ${{ inputs.release_tag }} + gh_token: ${{ secrets.GITHUB_TOKEN }} + - uses: ./.github/actions/build-and-tag-locally + with: + distribution: ${{ matrix.distribution }} + platform: ${{ matrix.platform }} + registry_username: ${{ github.actor }} + registry_password: ${{ secrets.GITHUB_TOKEN }} + publish_image: ${{ inputs.publish_image || vars.PUBLISH_IMAGE }} + registry_repository: ${{ format('ghcr.io/{0}', github.repository) }} + release_tag: ${{ inputs.release_tag }} + + collect-image-urls: + runs-on: ubuntu-latest + needs: build-and-test + if: ${{ inputs.release_tag }} + outputs: + docker_image_urls: ${{ steps.collect-urls.outputs.urls }} + steps: + - name: Download all image URL artifacts + uses: actions/download-artifact@v4 + with: + pattern: image-url-* + path: ./image-urls + merge-multiple: true + + - name: Collect image URLs from artifacts + id: collect-urls + run: | + if [ -d "./image-urls" ] && [ "$(ls -A ./image-urls 2>/dev/null)" ]; then + echo "Found image URL files:" + urls=$(find ./image-urls -name "*.txt" -exec cat {} \; | jq -R -s -c 'split("\n") | map(select(length > 0))') + echo "Collected image URLs: $urls" + else + echo "No image URL artifacts found" + urls="[]" + fi + + echo "urls=$urls" >> "$GITHUB_OUTPUT" + + notify-slack: + runs-on: ubuntu-latest + needs: collect-image-urls + if: ${{ inputs.release_tag && needs.collect-image-urls.outputs.docker_image_urls != '[]' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Send Slack notification + run: | + image_urls='${{ needs.collect-image-urls.outputs.docker_image_urls }}' + workflow_url="https://github.com/$%7B%7B%20github.repository%20%7D%7D/actions/runs/$%7B%7B%20github.run_id%20%7D%7D" + footer="Repository: ${{ github.repository }} | Commit: \`${{ github.sha }}\` | View: <$workflow_url|workflow run>" + + . ${GITHUB_WORKSPACE}/.github/actions/common/func.sh + + echo "$image_urls" | slack_format_docker_image_urls_message "${{ inputs.release_tag }}" "$footer" \ + | curl -s --fail-with-body -d@- "${{ secrets.SLACK_WEB_HOOK_URL }}" + + notify-slack-when-failed: + runs-on: ubuntu-latest + needs: collect-image-urls + if: ${{ inputs.release_tag && failure() }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Send Failure Slack notification + run: | + workflow_url="https://github.com/$%7B%7B%20github.repository%20%7D%7D/actions/runs/$%7B%7B%20github.run_id%20%7D%7D" + footer="Repository: ${{ github.repository }} | Commit: \`${{ github.sha }}\`" + + . ${GITHUB_WORKSPACE}/.github/actions/common/func.sh + + slack_format_failure_message "Docker Build failed for Redis: ${{ inputs.release_tag || 'unknown'}}" "$workflow_url" "$footer" \ + | curl -s --fail-with-body -d@- "${{ secrets.SLACK_WEB_HOOK_URL }}" \ No newline at end of file diff --git a/.github/workflows/release_build_and_test.yml b/.github/workflows/release_build_and_test.yml new file mode 100644 index 000000000..41299fb8d --- /dev/null +++ b/.github/workflows/release_build_and_test.yml @@ -0,0 +1,125 @@ +# This workflow is a part of release automation process. +# It is intended to be run with workflow_dispatch event by the automation. + +# Warning: Workflow does switch branches and this may lead to confusion when changing workflow actions. +# The usual safety rule is to make changes to workflow or actions in base branch (e.g, release/8.X) +# Version branches (e.g, 8.0.10-rc5-int8) will merge changes from base branch automatically. +on: + workflow_dispatch: + inputs: + release_tag: + description: 'Release tag to build' + required: true + workflow_uuid: + description: 'Optional UUID to identify this workflow run' + required: false + +# UUID is used to help automation to identify workflow run in the list of workflow runs. +run-name: "Release Build and Test${{ github.event.inputs.workflow_uuid && format(': {0}', github.event.inputs.workflow_uuid) || '' }}" + +jobs: + prepare-release: + runs-on: ["ubuntu-latest"] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Log in to Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Validate Redis Release Archive + uses: redis-developer/redis-oss-release-automation/.github/actions/validate-redis-release-archive@main + with: + release_tag: ${{ github.event.inputs.release_tag }} + + - name: Ensure Release Branch + id: ensure-branch + uses: redis-developer/redis-oss-release-automation/.github/actions/ensure-release-branch@main + with: + release_tag: ${{ github.event.inputs.release_tag }} + allow_modify: true + gh_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Apply Docker Version + id: apply-version + uses: ./.github/actions/apply-docker-version + with: + release_tag: ${{ github.event.inputs.release_tag }} + release_version_branch: ${{ steps.ensure-branch.outputs.release_version_branch }} + + build-and-test: + uses: ./.github/workflows/pre-merge.yml + needs: prepare-release + secrets: inherit + with: + release_tag: ${{ github.event.inputs.release_tag }} + publish_image: true + + merge-back-to-release-branch: + needs: [prepare-release, build-and-test] + if: success() + runs-on: ["ubuntu-latest"] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Ensure Release Branch + id: ensure-branch + uses: redis-developer/redis-oss-release-automation/.github/actions/ensure-release-branch@main + with: + release_tag: ${{ github.event.inputs.release_tag }} + allow_modify: false + gh_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Merge back to release branch + id: merge-back + uses: redis-developer/redis-oss-release-automation/.github/actions/merge-branches-verified@main + with: + from_branch: ${{ steps.ensure-branch.outputs.release_version_branch }} + to_branch: ${{ steps.ensure-branch.outputs.release_branch }} + gh_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Create release handle JSON + shell: bash + run: | + if [ -n "${{ steps.merge-back.outputs.merge_commit_sha }}" ]; then + RELEASE_COMMIT_SHA="${{ steps.merge-back.outputs.merge_commit_sha }}" + elif [ -n "${{ steps.merge-back.outputs.target_before_merge_sha }}" ]; then + RELEASE_COMMIT_SHA="${{ steps.merge-back.outputs.target_before_merge_sha }}" + else + echo "Error: No commit SHA found, both merge_commit_sha and target_before_merge_sha are empty" >&2 + exit 1 + fi + + # Get docker image URLs from build-and-test job + DOCKER_IMAGE_URLS='${{ needs.build-and-test.outputs.docker_image_urls }}' + + # Validate that DOCKER_IMAGE_URLS is valid JSON + if ! echo "$DOCKER_IMAGE_URLS" | jq . > /dev/null 2>&1; then + echo "Warning: docker_image_urls is not valid JSON, using empty array" + DOCKER_IMAGE_URLS="[]" + fi + + cat > release_handle.json << EOF + { + "release_commit_sha": "$RELEASE_COMMIT_SHA", + "release_version": "${{ github.event.inputs.release_tag }}", + "release_version_branch": "${{ steps.ensure-branch.outputs.release_version_branch }}", + "release_branch": "${{ steps.ensure-branch.outputs.release_branch }}", + "docker_image_urls": $DOCKER_IMAGE_URLS + } + EOF + + echo "Created release_handle.json:" + cat release_handle.json + + - name: Upload release handle artifact + uses: actions/upload-artifact@v4 + with: + name: release_handle + path: release_handle.json + retention-days: 400 \ No newline at end of file diff --git a/.github/workflows/release_publish.yml b/.github/workflows/release_publish.yml new file mode 100644 index 000000000..c57fe9d24 --- /dev/null +++ b/.github/workflows/release_publish.yml @@ -0,0 +1,163 @@ +# This workflow publishes a release by creating a version tag. +# It is intended to be run with workflow_dispatch event by the automation. + +on: + workflow_dispatch: + inputs: + release_handle: + description: 'Release handle JSON string containing release information' + required: true + type: string + workflow_uuid: + description: 'Optional UUID to identify this workflow run' + required: false + pr_to_official_library: + default: false + +env: + TARGET_OFFICIAL_IMAGES_REPO: docker-library/official-images + #TARGET_OFFICIAL_IMAGES_REPO: Peter-Sh/official-images + FORKED_OFFICIAL_IMAGES_REPO: redis-developer/docker-library-official-images + PR_USER_MENTIONS: "@adamiBs @yossigo @adobrzhansky @maxb-io @dagansandler @Peter-Sh" + #PR_USER_MENTIONS: "" + +# UUID is used to help automation to identify workflow run in the list of workflow runs. +run-name: "Release Publish${{ github.event.inputs.workflow_uuid && format(': {0}', github.event.inputs.workflow_uuid) || '' }}" + +jobs: + publish-release: + runs-on: ["ubuntu-latest"] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Parse release handle and validate + id: parse-release + shell: bash + run: | + # Parse the JSON input + RELEASE_HANDLE='${{ github.event.inputs.release_handle }}' + echo "Parsing release handle JSON:" + echo "$RELEASE_HANDLE" | jq . + + # Extract release_commit_sha + RELEASE_COMMIT_SHA=$(echo "$RELEASE_HANDLE" | jq -r '.release_commit_sha // empty') + + # Validate that release_commit_sha exists and is not empty + if [ -z "$RELEASE_COMMIT_SHA" ] || [ "$RELEASE_COMMIT_SHA" = "null" ]; then + echo "Error: release_commit_sha is missing or empty in release_handle" + echo "Release handle content: $RELEASE_HANDLE" + exit 1 + fi + + # Extract release_version for tag creation + RELEASE_VERSION=$(echo "$RELEASE_HANDLE" | jq -r '.release_version // empty') + + if [ -z "$RELEASE_VERSION" ] || [ "$RELEASE_VERSION" = "null" ]; then + echo "Error: release_version is missing or empty in release_handle" + echo "Release handle content: $RELEASE_HANDLE" + exit 1 + fi + + echo "Successfully parsed release handle:" + echo " release_commit_sha: $RELEASE_COMMIT_SHA" + echo " release_version: $RELEASE_VERSION" + + # Set outputs for next steps + echo "release_commit_sha=$RELEASE_COMMIT_SHA" >> $GITHUB_OUTPUT + echo "release_version=$RELEASE_VERSION" >> $GITHUB_OUTPUT + + - name: Create version tag + uses: redis-developer/redis-oss-release-automation/.github/actions/create-tag-verified@main + with: + tag: v${{ steps.parse-release.outputs.release_version }} + ref: ${{ steps.parse-release.outputs.release_commit_sha }} + gh_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Checkout official-images repo + uses: actions/checkout@v4 + with: + path: official-images + repository: ${{ github.event.inputs.pr_to_official_library == 'true' && env.TARGET_OFFICIAL_IMAGES_REPO || env.FORKED_OFFICIAL_IMAGES_REPO }} + + - name: Generate stackbrew library content + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Extract major version from release version (e.g., "8.2.1" -> "8") + MAJOR_VERSION=$(echo "${{ steps.parse-release.outputs.release_version }}" | cut -d. -f1) + echo "Major version: $MAJOR_VERSION" + + # Generate updated stackbrew content using the release automation Docker image + docker run --rm \ + -v ${{ github.workspace }}:/workspace \ + -w /workspace \ + -e FORCE_COLOR=1 \ + $(echo "ghcr.io/${{ github.repository }}/release-automation:latest" | tr '[:upper:]' '[:lower:]') \ + update-stackbrew-file $MAJOR_VERSION --input official-images/library/redis --output official-images/library/redis + cd official-images && git diff --color + cd - + + - name: Create pull request to official-images + id: create-pr + uses: peter-evans/create-pull-request@v7 + with: + token: ${{ secrets.GH_TOKEN_FOR_PR }} + draft: true + push-to-fork: ${{ github.event.inputs.pr_to_official_library == 'true' && env.FORKED_OFFICIAL_IMAGES_REPO || '' }} + path: official-images + branch: redis-${{ steps.parse-release.outputs.release_version }} + commit-message: "Redis: Update to ${{ steps.parse-release.outputs.release_version }}" + title: "Redis: Update to ${{ steps.parse-release.outputs.release_version }}" + body: | + Automated update for Redis ${{ steps.parse-release.outputs.release_version }} + + Release commit: ${{ steps.parse-release.outputs.release_commit_sha }} + Release tag: v${{ steps.parse-release.outputs.release_version }} + Compare: ${{ github.server_url }}/${{ github.repository }}/compare/v${{ steps.parse-release.outputs.release_version }}^1...v${{ steps.parse-release.outputs.release_version }} + + ${{ env.PR_USER_MENTIONS }} + + - name: PR creation results + run: | + echo "Pull Request Number: ${{ steps.create-pr.outputs.pull-request-number }}" + echo "Pull Request URL: ${{ steps.create-pr.outputs.pull-request-url }}" + + # Create release_info.json artifact + cat > release_info.json << EOF + { + "pull_request_number": "${{ steps.create-pr.outputs.pull-request-number }}", + "pull_request_url": "${{ steps.create-pr.outputs.pull-request-url }}" + } + EOF + + echo "Created release_info.json:" + cat release_info.json + + - name: Upload release info artifact + uses: actions/upload-artifact@v4 + with: + name: release_info + path: release_info.json + retention-days: 400 + + - name: Send Slack notification + run: | + workflow_url="https://github.com/$%7B%7B%20github.repository%20%7D%7D/actions/runs/$%7B%7B%20github.run_id%20%7D%7D" + footer="Repository: ${{ github.repository }} | Commit: \`${{ github.sha }}\` | View: <$workflow_url|workflow run>" + + . ${GITHUB_WORKSPACE}/.github/actions/common/func.sh + + slack_format_docker_PR_message "${{ steps.parse-release.outputs.release_version }}" "${{ steps.create-pr.outputs.pull-request-url }}" "$footer" \ + | curl -s --fail-with-body -d@- "${{ secrets.SLACK_WEB_HOOK_URL }}" + + - name: Send Failure Slack notification + if: failure() + run: | + workflow_url="https://github.com/$%7B%7B%20github.repository%20%7D%7D/actions/runs/$%7B%7B%20github.run_id%20%7D%7D" + footer="Repository: ${{ github.repository }} | Commit: \`${{ github.sha }}\`" + + . ${GITHUB_WORKSPACE}/.github/actions/common/func.sh + + slack_format_failure_message "Docker PR failed for Redis: ${{ steps.parse-release.outputs.release_version || 'unknown'}}" "$workflow_url" "$footer" \ + | curl -s --fail-with-body -d@- "${{ secrets.SLACK_WEB_HOOK_URL }}" diff --git a/.gitignore b/.gitignore deleted file mode 100644 index d548f66de..000000000 --- a/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.jq-template.awk diff --git a/README.md b/README.md index b3dea9433..27af22886 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # https://github.com/docker-library/redis -## Maintained by: [the Docker Community](https://github.com/docker-library/redis) +## Maintained by: [Redis LTD](https://redis.io/) -This is the Git repo of the [Docker "Official Image"](https://github.com/docker-library/official-images#what-are-official-images) for [`redis`](https://hub.docker.com/_/redis/) (not to be confused with any official `redis` image provided by `redis` upstream). See [the Docker Hub page](https://hub.docker.com/_/redis/) for the full readme on how to use this Docker image and for information regarding contributing and issues. +This is the Git repo of the [Docker "Official Image"](https://github.com/docker-library/official-images#what-are-official-images) for [`redis`](https://hub.docker.com/_/redis/). See [the Docker Hub page](https://hub.docker.com/_/redis/) for the full `README` on how to use this Docker image and for information regarding contributing and issues. The [full image description on Docker Hub](https://hub.docker.com/_/redis/) is generated/maintained over in [the docker-library/docs repository](https://github.com/docker-library/docs), specifically in [the `redis` directory](https://github.com/docker-library/docs/tree/master/redis). @@ -22,5 +22,3 @@ For outstanding `redis` image PRs, check [PRs with the "library/redis" label on | [![amd64 build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/amd64/job/redis.svg?label=amd64)](https://doi-janky.infosiftr.net/job/multiarch/job/amd64/job/redis/) | [![arm32v5 build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/arm32v5/job/redis.svg?label=arm32v5)](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v5/job/redis/) | [![arm32v6 build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/arm32v6/job/redis.svg?label=arm32v6)](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v6/job/redis/) | [![arm32v7 build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/arm32v7/job/redis.svg?label=arm32v7)](https://doi-janky.infosiftr.net/job/multiarch/job/arm32v7/job/redis/) | | [![arm64v8 build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/arm64v8/job/redis.svg?label=arm64v8)](https://doi-janky.infosiftr.net/job/multiarch/job/arm64v8/job/redis/) | [![i386 build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/i386/job/redis.svg?label=i386)](https://doi-janky.infosiftr.net/job/multiarch/job/i386/job/redis/) | [![mips64le build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/mips64le/job/redis.svg?label=mips64le)](https://doi-janky.infosiftr.net/job/multiarch/job/mips64le/job/redis/) | [![ppc64le build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/ppc64le/job/redis.svg?label=ppc64le)](https://doi-janky.infosiftr.net/job/multiarch/job/ppc64le/job/redis/) | | [![s390x build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/multiarch/job/s390x/job/redis.svg?label=s390x)](https://doi-janky.infosiftr.net/job/multiarch/job/s390x/job/redis/) | [![put-shared build status badge](https://img.shields.io/jenkins/s/https/doi-janky.infosiftr.net/job/put-shared/job/light/job/redis.svg?label=put-shared)](https://doi-janky.infosiftr.net/job/put-shared/job/light/job/redis/) | - - diff --git a/alpine/Dockerfile b/alpine/Dockerfile new file mode 100644 index 000000000..f84cbe688 --- /dev/null +++ b/alpine/Dockerfile @@ -0,0 +1,149 @@ +FROM alpine:3.21 + +# add our user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added +RUN set -eux; \ +# alpine already has a gid 999, so we'll use the next id + addgroup -S -g 1000 redis; \ + adduser -S -G redis -u 999 redis + +# runtime dependencies +RUN set -eux; \ + apk add --no-cache \ +# add tzdata for https://github.com/docker-library/redis/issues/138 + tzdata \ +# we need setpriv package as busybox provides very limited functionality + setpriv \ + ; +ENV REDIS_DOWNLOAD_URL=https://github.com/redis/redis/archive/refs/tags/8.0.4.tar.gz +ENV REDIS_DOWNLOAD_SHA=aadd6b0aac9ea0178b3c9a1a78469f2085752f743d563feba639d2e2c69c7ad1 +RUN set -eux; \ + \ + apk add --no-cache --virtual .build-deps \ + coreutils \ + dpkg-dev dpkg \ + gcc \ + linux-headers \ + make \ + musl-dev \ + openssl-dev \ + g++; \ + \ + arch="$(dpkg --print-architecture | awk -F- '{ print $NF }')"; \ + case "$arch" in \ + 'amd64') export BUILD_WITH_MODULES=yes; export INSTALL_RUST_TOOLCHAIN=yes; export DISABLE_WERRORS=yes ;; \ + 'arm64') export BUILD_WITH_MODULES=yes; export INSTALL_RUST_TOOLCHAIN=yes; export DISABLE_WERRORS=yes ;; \ + *) echo >&2 "Modules are NOT supported! unsupported architecture: '$arch'"; export BUILD_WITH_MODULES=no ;; \ + esac; \ + if [ "$BUILD_WITH_MODULES" = "yes" ]; then \ + apk add --no-cache --virtual .module-build-deps \ + autoconf \ + automake \ + bash \ + bsd-compat-headers \ + build-base \ + cargo \ + clang \ + clang18-libclang \ + cmake \ + curl \ + g++ \ + git \ + libffi-dev \ + libgcc \ + libtool \ + openssh \ + openssl \ + py-virtualenv \ + py3-cryptography \ + py3-pip \ + py3-virtualenv \ + python3 \ + python3-dev \ + rsync \ + tar \ + unzip \ + which \ + xsimd \ + xz; \ + fi; \ + \ +# install required python packages for RedisJSON module + pip install -q --upgrade setuptools && pip install -q --upgrade pip && PIP_BREAK_SYSTEM_PACKAGES=1 pip install -q addict toml jinja2 ramp-packer ;\ + wget -O redis.tar.gz "$REDIS_DOWNLOAD_URL"; \ + echo "$REDIS_DOWNLOAD_SHA *redis.tar.gz" | sha256sum -c -; \ + mkdir -p /usr/src/redis; \ + tar -xzf redis.tar.gz -C /usr/src/redis --strip-components=1; \ + rm redis.tar.gz; \ + \ +# disable Redis protected mode [1] as it is unnecessary in context of Docker +# (ports are not automatically exposed when running inside Docker, but rather explicitly by specifying -p / -P) +# [1]: https://github.com/redis/redis/commit/edd4d555df57dc84265fdfb4ef59a4678832f6da + grep -E '^ *createBoolConfig[(]"protected-mode",.*, *1 *,.*[)],$' /usr/src/redis/src/config.c; \ + sed -ri 's!^( *createBoolConfig[(]"protected-mode",.*, *)1( *,.*[)],)$!\10\2!' /usr/src/redis/src/config.c; \ + grep -E '^ *createBoolConfig[(]"protected-mode",.*, *0 *,.*[)],$' /usr/src/redis/src/config.c; \ +# for future reference, we modify this directly in the source instead of just supplying a default configuration flag because apparently "if you specify any argument to redis-server, [it assumes] you are going to specify everything" +# see also https://github.com/docker-library/redis/issues/4#issuecomment-50780840 +# (more exactly, this makes sure the default behavior of "save on SIGTERM" stays functional by default) + \ +# https://github.com/jemalloc/jemalloc/issues/467 -- we need to patch the "./configure" for the bundled jemalloc to match how Debian compiles, for compatibility +# (also, we do cross-builds, so we need to embed the appropriate "--build=xxx" values to that "./configure" invocation) + gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; \ + extraJemallocConfigureFlags="--build=$gnuArch"; \ +# https://salsa.debian.org/debian/jemalloc/-/blob/c0a88c37a551be7d12e4863435365c9a6a51525f/debian/rules#L8-23 + dpkgArch="$(dpkg --print-architecture)"; \ + case "${dpkgArch##*-}" in \ + amd64 | i386 | x32) extraJemallocConfigureFlags="$extraJemallocConfigureFlags --with-lg-page=12" ;; \ + *) extraJemallocConfigureFlags="$extraJemallocConfigureFlags --with-lg-page=16" ;; \ + esac; \ + extraJemallocConfigureFlags="$extraJemallocConfigureFlags --with-lg-hugepage=21"; \ + grep -F 'cd jemalloc && ./configure ' /usr/src/redis/deps/Makefile; \ + sed -ri 's!cd jemalloc && ./configure !&'"$extraJemallocConfigureFlags"' !' /usr/src/redis/deps/Makefile; \ + grep -F "cd jemalloc && ./configure $extraJemallocConfigureFlags " /usr/src/redis/deps/Makefile; \ + \ + export BUILD_TLS=yes; \ + if [ "$BUILD_WITH_MODULES" = "yes" ]; then \ + make -C /usr/src/redis/modules/redisjson get_source; \ + sed -i 's/^RUST_FLAGS=$/RUST_FLAGS += -C target-feature=-crt-static/' /usr/src/redis/modules/redisjson/src/Makefile ; \ + grep -E 'RUST_FLAGS' /usr/src/redis/modules/redisjson/src/Makefile; \ + fi; \ + make -C /usr/src/redis -j "$(nproc)" all; \ + make -C /usr/src/redis install; \ + \ +# TODO https://github.com/redis/redis/pull/3494 (deduplicate "redis-server" copies) + serverMd5="$(md5sum /usr/local/bin/redis-server | cut -d' ' -f1)"; export serverMd5; \ + find /usr/local/bin/redis* -maxdepth 0 \ + -type f -not -name redis-server \ + -exec sh -eux -c ' \ + md5="$(md5sum "$1" | cut -d" " -f1)"; \ + test "$md5" = "$serverMd5"; \ + ' -- '{}' ';' \ + -exec ln -svfT 'redis-server' '{}' ';' \ + ; \ + \ + make -C /usr/src/redis distclean; \ + rm -r /usr/src/redis; \ + \ + runDeps="$( \ + scanelf --needed --nobanner --format '%n#p' --recursive /usr/local \ + | tr ',' '\n' \ + | sort -u \ + | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \ + )"; \ + apk add --no-network --virtual .redis-rundeps $runDeps; \ + if [ "$BUILD_WITH_MODULES" = "yes" ]; then \ + apk del --no-network .module-build-deps; \ + fi; \ + apk del --no-network .build-deps; \ + rm -rf ~/.cache ~/.gitconfig; \ + \ + redis-cli --version; \ + redis-server --version; +RUN mkdir /data && chown redis:redis /data +VOLUME /data +WORKDIR /data + +COPY docker-entrypoint.sh /usr/local/bin/ +ENTRYPOINT ["docker-entrypoint.sh"] + +EXPOSE 6379 +CMD ["redis-server"] diff --git a/alpine/docker-entrypoint.sh b/alpine/docker-entrypoint.sh new file mode 100755 index 000000000..476913d93 --- /dev/null +++ b/alpine/docker-entrypoint.sh @@ -0,0 +1,184 @@ +#!/bin/sh +set -e + +SETPRIV="/usr/bin/setpriv --reuid redis --regid redis --clear-groups" +IS_REDIS_SENTINEL="" +IS_REDIS_SERVER="" +CONFIG="" + +SKIP_FIX_PERMS_NOTICE="Use SKIP_FIX_PERMS=1 to skip permission changes." + +# functions +has_cap() { + /usr/bin/setpriv -d | grep -q 'Capability bounding set:.*\b'"$1"'\b' +} + +check_for_sentinel() { + CMD="$1" + shift + if [ "$CMD" = '/usr/local/bin/redis-server' ]; then + for arg in "$@"; do + if [ "$arg" = "--sentinel" ]; then + return 0 + fi + done + fi + + if [ "$CMD" = '/usr/local/bin/redis-sentinel' ]; then + return 0 + fi + + return 1 +} + +# Note: Change permissions only in simple, default cases to avoid affecting +# unexpected or user-specific files. + +fix_data_dir_perms() { + # Expecting only *.rdb files and default appendonlydir; skip if others are found. + unknown_file="$(find . -mindepth 1 -maxdepth 1 \ + -not \( -name \*.rdb -or \( -type d -and -name appendonlydir \) \) \ + -print -quit)" + if [ -z "$unknown_file" ]; then + find . -print0 | fix_perms_and_owner rw + else + echo "Notice: Unknown file '$unknown_file' found in data dir. Permissions will not be modified. $SKIP_FIX_PERMS_NOTICE" + fi +} + +fix_config_perms() { + config="$1" + mode="$2" + + if [ ! -f "$config" ]; then + return 0 + fi + + confdir="$(dirname "$config")" + if [ ! -d "$confdir" ]; then + return 0 + fi + + # Expecting only the config file; skip if others are found. + pattern=$(printf "%s" "$(basename "$config")" | sed 's/[][?*]/\\&/g') + unknown_file=$(find "$confdir" -mindepth 1 -maxdepth 1 -not -name "$pattern" -print -quit) + + if [ -z "$unknown_file" ]; then + printf '%s\0%s\0' "$confdir" "$config" | fix_perms_and_owner "$mode" + else + echo "Notice: Unknown file '$unknown_file' found in '$confdir'. Permissions will not be modified. $SKIP_FIX_PERMS_NOTICE" + + fi +} + +fix_perms_and_owner() { + mode="$1" + + # shellcheck disable=SC3045 + while IFS= read -r -d '' file; do + if [ "$mode" = "rw" ] && $SETPRIV test -r "$file" -a -w "$file"; then + continue + elif [ "$mode" = "r" ] && $SETPRIV test -r "$file"; then + continue + fi + new_mode=$mode + if [ -d "$file" ]; then + new_mode=${mode}x + fi + err=$(chown redis "$file" 2>&1) || echo "Warning: cannot change owner to 'redis' for '$file': $err. $SKIP_FIX_PERMS_NOTICE" + err=$(chmod "u+$new_mode" "$file" 2>&1) || echo "Warning: cannot change mode to 'u+$new_mode' for '$file': $err. $SKIP_FIX_PERMS_NOTICE" + done +} + +# first arg is `-f` or `--some-option` +# or first arg is `something.conf` +if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then + set -- redis-server "$@" +fi +CMD=$(command -v "$1" 2>/dev/null || :) + +if [ "$(readlink -f "$CMD")" = '/usr/local/bin/redis-server' ]; then + IS_REDIS_SERVER=1 +fi + +if check_for_sentinel "$CMD" "$@"; then + IS_REDIS_SENTINEL=1 +fi + +# if is server and its first arg is not an option then it's a config +if [ "$IS_REDIS_SERVER" ] && [ "${2#-}" = "$2" ]; then + CONFIG="$2" +fi + +# drop privileges only if +# we are starting either server or sentinel +# our uid is 0 (container started without explicit --user) +# and we have capabilities required to drop privs +if [ "$IS_REDIS_SERVER" ] && [ -z "$SKIP_DROP_PRIVS" ] && [ "$(id -u)" = '0' ] && has_cap setuid && has_cap setgid; then + if [ -z "$SKIP_FIX_PERMS" ]; then + # fix permissions + if [ "$IS_REDIS_SENTINEL" ]; then + fix_config_perms "$CONFIG" rw + else + fix_data_dir_perms + fix_config_perms "$CONFIG" r + fi + fi + + CAPS_TO_KEEP="" + if has_cap sys_resource; then + # we have sys_resource capability, keep it available for redis + # as redis may use it to increase open files limit + CAPS_TO_KEEP=",+sys_resource" + fi + exec $SETPRIV \ + --nnp \ + --inh-caps=-all$CAPS_TO_KEEP \ + --ambient-caps=-all$CAPS_TO_KEEP \ + --bounding-set=-all$CAPS_TO_KEEP \ + "$0" "$@" +fi + +# set an appropriate umask (if one isn't set already) +# - https://github.com/docker-library/redis/issues/305 +# - https://github.com/redis/redis/blob/bb875603fb7ff3f9d19aad906bd45d7db98d9a39/utils/systemd-redis_server.service#L37 +um="$(umask)" +if [ "$um" = '0022' ]; then + umask 0077 +fi + +if [ "$IS_REDIS_SERVER" ] && ! [ "$IS_REDIS_SENTINEL" ]; then + echo "Starting Redis Server" + modules_dir="/usr/local/lib/redis/modules/" + + if [ ! -d "$modules_dir" ]; then + echo "Warning: Default Redis modules directory $modules_dir does not exist." + elif [ -n "$(ls -A $modules_dir 2>/dev/null)" ]; then + for module in "$modules_dir"/*.so; + do + if [ ! -s "$module" ]; then + echo "Skipping module $module: file has no size." + continue + fi + + if [ -d "$module" ]; then + echo "Skipping module $module: is a directory." + continue + fi + + if [ ! -r "$module" ]; then + echo "Skipping module $module: file is not readable." + continue + fi + + if [ ! -x "$module" ]; then + echo "Warning: Module $module is not executable." + continue + fi + + set -- "$@" --loadmodule "$module" + done + fi +fi + +exec "$@" diff --git a/debian/Dockerfile b/debian/Dockerfile new file mode 100644 index 000000000..5e6006dc6 --- /dev/null +++ b/debian/Dockerfile @@ -0,0 +1,130 @@ +FROM debian:bookworm-slim + +# add our user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added +RUN set -eux; \ + groupadd -r -g 999 redis; \ + useradd -r -g redis -u 999 redis + +# runtime dependencies +RUN set -eux; \ + apt-get update; \ + apt-get install -y --no-install-recommends \ +# add tzdata explicitly for https://github.com/docker-library/redis/issues/138 (see also https://bugs.debian.org/837060 and related) + tzdata \ + ; \ + rm -rf /var/lib/apt/lists/* + +ENV REDIS_DOWNLOAD_URL=https://github.com/redis/redis/archive/refs/tags/8.0.4.tar.gz +ENV REDIS_DOWNLOAD_SHA=aadd6b0aac9ea0178b3c9a1a78469f2085752f743d563feba639d2e2c69c7ad1 +RUN set -eux; \ + \ + savedAptMark="$(apt-mark showmanual)"; \ + apt-get update; \ + apt-get install -y --no-install-recommends \ + ca-certificates \ + wget \ + dpkg-dev \ + gcc \ + g++ \ + libc6-dev \ + libssl-dev \ + make; \ + \ + arch="$(dpkg --print-architecture | awk -F- '{ print $NF }')"; \ + case "$arch" in \ + 'amd64') export BUILD_WITH_MODULES=yes; export INSTALL_RUST_TOOLCHAIN=yes; export DISABLE_WERRORS=yes ;; \ + 'arm64') export BUILD_WITH_MODULES=yes; export INSTALL_RUST_TOOLCHAIN=yes; export DISABLE_WERRORS=yes ;; \ + *) echo >&2 "Modules are NOT supported! unsupported architecture: '$arch'"; export BUILD_WITH_MODULES=no ;; \ + esac; \ + if [ "$BUILD_WITH_MODULES" = "yes" ]; then \ + apt-get install -y --no-install-recommends \ + git \ + cmake \ + python3 \ + python3-pip \ + python3-venv \ + python3-dev \ + unzip \ + rsync \ + clang \ + automake \ + autoconf \ + libtool \ + g++; \ + fi; \ + \ + rm -rf /var/lib/apt/lists/*; \ + \ + wget -O redis.tar.gz "$REDIS_DOWNLOAD_URL"; \ + echo "$REDIS_DOWNLOAD_SHA *redis.tar.gz" | sha256sum -c -; \ + mkdir -p /usr/src/redis; \ + tar -xzf redis.tar.gz -C /usr/src/redis --strip-components=1; \ + rm redis.tar.gz; \ + \ +# disable Redis protected mode [1] as it is unnecessary in context of Docker +# (ports are not automatically exposed when running inside Docker, but rather explicitly by specifying -p / -P) +# [1]: https://github.com/redis/redis/commit/edd4d555df57dc84265fdfb4ef59a4678832f6da + grep -E '^ *createBoolConfig[(]"protected-mode",.*, *1 *,.*[)],$' /usr/src/redis/src/config.c; \ + sed -ri 's!^( *createBoolConfig[(]"protected-mode",.*, *)1( *,.*[)],)$!\10\2!' /usr/src/redis/src/config.c; \ + grep -E '^ *createBoolConfig[(]"protected-mode",.*, *0 *,.*[)],$' /usr/src/redis/src/config.c; \ +# for future reference, we modify this directly in the source instead of just supplying a default configuration flag because apparently "if you specify any argument to redis-server, [it assumes] you are going to specify everything" +# see also https://github.com/docker-library/redis/issues/4#issuecomment-50780840 +# (more exactly, this makes sure the default behavior of "save on SIGTERM" stays functional by default) + \ +# https://github.com/jemalloc/jemalloc/issues/467 -- we need to patch the "./configure" for the bundled jemalloc to match how Debian compiles, for compatibility +# (also, we do cross-builds, so we need to embed the appropriate "--build=xxx" values to that "./configure" invocation) + gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; \ + extraJemallocConfigureFlags="--build=$gnuArch"; \ +# https://salsa.debian.org/debian/jemalloc/-/blob/c0a88c37a551be7d12e4863435365c9a6a51525f/debian/rules#L8-23 + case "${arch##*-}" in \ + amd64 | i386 | x32) extraJemallocConfigureFlags="$extraJemallocConfigureFlags --with-lg-page=12" ;; \ + *) extraJemallocConfigureFlags="$extraJemallocConfigureFlags --with-lg-page=16" ;; \ + esac; \ + extraJemallocConfigureFlags="$extraJemallocConfigureFlags --with-lg-hugepage=21"; \ + grep -F 'cd jemalloc && ./configure ' /usr/src/redis/deps/Makefile; \ + sed -ri 's!cd jemalloc && ./configure !&'"$extraJemallocConfigureFlags"' !' /usr/src/redis/deps/Makefile; \ + grep -F "cd jemalloc && ./configure $extraJemallocConfigureFlags " /usr/src/redis/deps/Makefile; \ + \ + export BUILD_TLS=yes; \ + make -C /usr/src/redis -j "$(nproc)" all; \ + make -C /usr/src/redis install; \ + \ +# TODO https://github.com/redis/redis/pull/3494 (deduplicate "redis-server" copies) + serverMd5="$(md5sum /usr/local/bin/redis-server | cut -d' ' -f1)"; export serverMd5; \ + find /usr/local/bin/redis* -maxdepth 0 \ + -type f -not -name redis-server \ + -exec sh -eux -c ' \ + md5="$(md5sum "$1" | cut -d" " -f1)"; \ + test "$md5" = "$serverMd5"; \ + ' -- '{}' ';' \ + -exec ln -svfT 'redis-server' '{}' ';' \ + ; \ + \ + make -C /usr/src/redis distclean; \ + rm -r /usr/src/redis; \ + \ + apt-mark auto '.*' > /dev/null; \ + [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark > /dev/null; \ + find /usr/local -type f -executable -exec ldd '{}' ';' \ + | awk '/=>/ { so = $(NF-1); if (index(so, "/usr/local/") == 1) { next }; gsub("^/(usr/)?", "", so); printf "*%s\n", so }' \ + | sort -u \ + | xargs -r dpkg-query --search \ + | cut -d: -f1 \ + | sort -u \ + | xargs -r apt-mark manual \ + ; \ + apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \ + rm -rf /var/cache/debconf/*; \ + \ + redis-cli --version; \ + redis-server --version + +RUN mkdir /data && chown redis:redis /data +VOLUME /data +WORKDIR /data + +COPY docker-entrypoint.sh /usr/local/bin/ +ENTRYPOINT ["docker-entrypoint.sh"] + +EXPOSE 6379 +CMD ["redis-server"] diff --git a/debian/docker-entrypoint.sh b/debian/docker-entrypoint.sh new file mode 100755 index 000000000..d0a21fe4d --- /dev/null +++ b/debian/docker-entrypoint.sh @@ -0,0 +1,184 @@ +#!/bin/bash +set -e + +SETPRIV="/usr/bin/setpriv --reuid redis --regid redis --clear-groups" +IS_REDIS_SENTINEL="" +IS_REDIS_SERVER="" +CONFIG="" + +SKIP_FIX_PERMS_NOTICE="Use SKIP_FIX_PERMS=1 to skip permission changes." + +# functions +has_cap() { + /usr/bin/setpriv -d | grep -q 'Capability bounding set:.*\b'"$1"'\b' +} + +check_for_sentinel() { + CMD="$1" + shift + if [ "$CMD" = '/usr/local/bin/redis-server' ]; then + for arg in "$@"; do + if [ "$arg" = "--sentinel" ]; then + return 0 + fi + done + fi + + if [ "$CMD" = '/usr/local/bin/redis-sentinel' ]; then + return 0 + fi + + return 1 +} + +# Note: Change permissions only in simple, default cases to avoid affecting +# unexpected or user-specific files. + +fix_data_dir_perms() { + # Expecting only *.rdb files and default appendonlydir; skip if others are found. + unknown_file="$(find . -mindepth 1 -maxdepth 1 \ + -not \( -name \*.rdb -or \( -type d -and -name appendonlydir \) \) \ + -print -quit)" + if [ -z "$unknown_file" ]; then + find . -print0 | fix_perms_and_owner rw + else + echo "Notice: Unknown file '$unknown_file' found in data dir. Permissions will not be modified. $SKIP_FIX_PERMS_NOTICE" + fi +} + +fix_config_perms() { + config="$1" + mode="$2" + + if [ ! -f "$config" ]; then + return 0 + fi + + confdir="$(dirname "$config")" + if [ ! -d "$confdir" ]; then + return 0 + fi + + # Expecting only the config file; skip if others are found. + pattern=$(printf "%s" "$(basename "$config")" | sed 's/[][?*]/\\&/g') + unknown_file=$(find "$confdir" -mindepth 1 -maxdepth 1 -not -name "$pattern" -print -quit) + + if [ -z "$unknown_file" ]; then + printf '%s\0%s\0' "$confdir" "$config" | fix_perms_and_owner "$mode" + else + echo "Notice: Unknown file '$unknown_file' found in '$confdir'. Permissions will not be modified. $SKIP_FIX_PERMS_NOTICE" + + fi +} + +fix_perms_and_owner() { + mode="$1" + + # shellcheck disable=SC3045 + while IFS= read -r -d '' file; do + if [ "$mode" = "rw" ] && $SETPRIV test -r "$file" -a -w "$file"; then + continue + elif [ "$mode" = "r" ] && $SETPRIV test -r "$file"; then + continue + fi + new_mode=$mode + if [ -d "$file" ]; then + new_mode=${mode}x + fi + err=$(chown redis "$file" 2>&1) || echo "Warning: cannot change owner to 'redis' for '$file': $err. $SKIP_FIX_PERMS_NOTICE" + err=$(chmod "u+$new_mode" "$file" 2>&1) || echo "Warning: cannot change mode to 'u+$new_mode' for '$file': $err. $SKIP_FIX_PERMS_NOTICE" + done +} + +# first arg is `-f` or `--some-option` +# or first arg is `something.conf` +if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then + set -- redis-server "$@" +fi +CMD=$(command -v "$1" 2>/dev/null || :) + +if [ "$(readlink -f "$CMD")" = '/usr/local/bin/redis-server' ]; then + IS_REDIS_SERVER=1 +fi + +if check_for_sentinel "$CMD" "$@"; then + IS_REDIS_SENTINEL=1 +fi + +# if is server and its first arg is not an option then it's a config +if [ "$IS_REDIS_SERVER" ] && [ "${2#-}" = "$2" ]; then + CONFIG="$2" +fi + +# drop privileges only if +# we are starting either server or sentinel +# our uid is 0 (container started without explicit --user) +# and we have capabilities required to drop privs +if [ "$IS_REDIS_SERVER" ] && [ -z "$SKIP_DROP_PRIVS" ] && [ "$(id -u)" = '0' ] && has_cap setuid && has_cap setgid; then + if [ -z "$SKIP_FIX_PERMS" ]; then + # fix permissions + if [ "$IS_REDIS_SENTINEL" ]; then + fix_config_perms "$CONFIG" rw + else + fix_data_dir_perms + fix_config_perms "$CONFIG" r + fi + fi + + CAPS_TO_KEEP="" + if has_cap sys_resource; then + # we have sys_resource capability, keep it available for redis + # as redis may use it to increase open files limit + CAPS_TO_KEEP=",+sys_resource" + fi + exec $SETPRIV \ + --nnp \ + --inh-caps=-all$CAPS_TO_KEEP \ + --ambient-caps=-all$CAPS_TO_KEEP \ + --bounding-set=-all$CAPS_TO_KEEP \ + "$0" "$@" +fi + +# set an appropriate umask (if one isn't set already) +# - https://github.com/docker-library/redis/issues/305 +# - https://github.com/redis/redis/blob/bb875603fb7ff3f9d19aad906bd45d7db98d9a39/utils/systemd-redis_server.service#L37 +um="$(umask)" +if [ "$um" = '0022' ]; then + umask 0077 +fi + +if [ "$IS_REDIS_SERVER" ] && ! [ "$IS_REDIS_SENTINEL" ]; then + echo "Starting Redis Server" + modules_dir="/usr/local/lib/redis/modules/" + + if [ ! -d "$modules_dir" ]; then + echo "Warning: Default Redis modules directory $modules_dir does not exist." + elif [ -n "$(ls -A $modules_dir 2>/dev/null)" ]; then + for module in "$modules_dir"/*.so; + do + if [ ! -s "$module" ]; then + echo "Skipping module $module: file has no size." + continue + fi + + if [ -d "$module" ]; then + echo "Skipping module $module: is a directory." + continue + fi + + if [ ! -r "$module" ]; then + echo "Skipping module $module: file is not readable." + continue + fi + + if [ ! -x "$module" ]; then + echo "Warning: Module $module is not executable." + continue + fi + + set -- "$@" --loadmodule "$module" + done + fi +fi + +exec "$@" diff --git a/.github/workflows/ci.yml b/legacy-templating-scripts/.github/workflows/ci.yml similarity index 100% rename from .github/workflows/ci.yml rename to legacy-templating-scripts/.github/workflows/ci.yml diff --git a/.github/workflows/verify-templating.yml b/legacy-templating-scripts/.github/workflows/verify-templating.yml similarity index 100% rename from .github/workflows/verify-templating.yml rename to legacy-templating-scripts/.github/workflows/verify-templating.yml diff --git a/6.2/alpine/Dockerfile b/legacy-templating-scripts/6.2/alpine/Dockerfile similarity index 100% rename from 6.2/alpine/Dockerfile rename to legacy-templating-scripts/6.2/alpine/Dockerfile diff --git a/6.2/alpine/docker-entrypoint.sh b/legacy-templating-scripts/6.2/alpine/docker-entrypoint.sh similarity index 100% rename from 6.2/alpine/docker-entrypoint.sh rename to legacy-templating-scripts/6.2/alpine/docker-entrypoint.sh diff --git a/6.2/debian/Dockerfile b/legacy-templating-scripts/6.2/debian/Dockerfile similarity index 100% rename from 6.2/debian/Dockerfile rename to legacy-templating-scripts/6.2/debian/Dockerfile diff --git a/6.2/debian/docker-entrypoint.sh b/legacy-templating-scripts/6.2/debian/docker-entrypoint.sh similarity index 100% rename from 6.2/debian/docker-entrypoint.sh rename to legacy-templating-scripts/6.2/debian/docker-entrypoint.sh diff --git a/7.0/alpine/Dockerfile b/legacy-templating-scripts/7.0/alpine/Dockerfile similarity index 100% rename from 7.0/alpine/Dockerfile rename to legacy-templating-scripts/7.0/alpine/Dockerfile diff --git a/7.0/alpine/docker-entrypoint.sh b/legacy-templating-scripts/7.0/alpine/docker-entrypoint.sh similarity index 100% rename from 7.0/alpine/docker-entrypoint.sh rename to legacy-templating-scripts/7.0/alpine/docker-entrypoint.sh diff --git a/7.0/debian/Dockerfile b/legacy-templating-scripts/7.0/debian/Dockerfile similarity index 100% rename from 7.0/debian/Dockerfile rename to legacy-templating-scripts/7.0/debian/Dockerfile diff --git a/7.0/debian/docker-entrypoint.sh b/legacy-templating-scripts/7.0/debian/docker-entrypoint.sh similarity index 100% rename from 7.0/debian/docker-entrypoint.sh rename to legacy-templating-scripts/7.0/debian/docker-entrypoint.sh diff --git a/7.2/alpine/Dockerfile b/legacy-templating-scripts/7.2/alpine/Dockerfile similarity index 100% rename from 7.2/alpine/Dockerfile rename to legacy-templating-scripts/7.2/alpine/Dockerfile diff --git a/7.2/alpine/docker-entrypoint.sh b/legacy-templating-scripts/7.2/alpine/docker-entrypoint.sh similarity index 100% rename from 7.2/alpine/docker-entrypoint.sh rename to legacy-templating-scripts/7.2/alpine/docker-entrypoint.sh diff --git a/7.2/debian/Dockerfile b/legacy-templating-scripts/7.2/debian/Dockerfile similarity index 100% rename from 7.2/debian/Dockerfile rename to legacy-templating-scripts/7.2/debian/Dockerfile diff --git a/7.2/debian/docker-entrypoint.sh b/legacy-templating-scripts/7.2/debian/docker-entrypoint.sh similarity index 100% rename from 7.2/debian/docker-entrypoint.sh rename to legacy-templating-scripts/7.2/debian/docker-entrypoint.sh diff --git a/7.4-rc/alpine/Dockerfile b/legacy-templating-scripts/7.4-rc/alpine/Dockerfile similarity index 100% rename from 7.4-rc/alpine/Dockerfile rename to legacy-templating-scripts/7.4-rc/alpine/Dockerfile diff --git a/7.4-rc/alpine/docker-entrypoint.sh b/legacy-templating-scripts/7.4-rc/alpine/docker-entrypoint.sh similarity index 100% rename from 7.4-rc/alpine/docker-entrypoint.sh rename to legacy-templating-scripts/7.4-rc/alpine/docker-entrypoint.sh diff --git a/7.4-rc/debian/Dockerfile b/legacy-templating-scripts/7.4-rc/debian/Dockerfile similarity index 100% rename from 7.4-rc/debian/Dockerfile rename to legacy-templating-scripts/7.4-rc/debian/Dockerfile diff --git a/7.4-rc/debian/docker-entrypoint.sh b/legacy-templating-scripts/7.4-rc/debian/docker-entrypoint.sh similarity index 100% rename from 7.4-rc/debian/docker-entrypoint.sh rename to legacy-templating-scripts/7.4-rc/debian/docker-entrypoint.sh diff --git a/7.4/alpine/Dockerfile b/legacy-templating-scripts/7.4/alpine/Dockerfile similarity index 100% rename from 7.4/alpine/Dockerfile rename to legacy-templating-scripts/7.4/alpine/Dockerfile diff --git a/7.4/alpine/docker-entrypoint.sh b/legacy-templating-scripts/7.4/alpine/docker-entrypoint.sh similarity index 100% rename from 7.4/alpine/docker-entrypoint.sh rename to legacy-templating-scripts/7.4/alpine/docker-entrypoint.sh diff --git a/7.4/debian/Dockerfile b/legacy-templating-scripts/7.4/debian/Dockerfile similarity index 100% rename from 7.4/debian/Dockerfile rename to legacy-templating-scripts/7.4/debian/Dockerfile diff --git a/7.4/debian/docker-entrypoint.sh b/legacy-templating-scripts/7.4/debian/docker-entrypoint.sh similarity index 100% rename from 7.4/debian/docker-entrypoint.sh rename to legacy-templating-scripts/7.4/debian/docker-entrypoint.sh diff --git a/BUILD.md b/legacy-templating-scripts/BUILD.md similarity index 100% rename from BUILD.md rename to legacy-templating-scripts/BUILD.md diff --git a/Dockerfile.template b/legacy-templating-scripts/Dockerfile.template similarity index 100% rename from Dockerfile.template rename to legacy-templating-scripts/Dockerfile.template diff --git a/apply-templates.sh b/legacy-templating-scripts/apply-templates.sh similarity index 100% rename from apply-templates.sh rename to legacy-templating-scripts/apply-templates.sh diff --git a/docker-entrypoint.sh b/legacy-templating-scripts/docker-entrypoint.sh similarity index 100% rename from docker-entrypoint.sh rename to legacy-templating-scripts/docker-entrypoint.sh diff --git a/generate-stackbrew-library-mac.sh b/legacy-templating-scripts/generate-stackbrew-library-mac.sh similarity index 100% rename from generate-stackbrew-library-mac.sh rename to legacy-templating-scripts/generate-stackbrew-library-mac.sh diff --git a/generate-stackbrew-library.sh b/legacy-templating-scripts/generate-stackbrew-library.sh similarity index 100% rename from generate-stackbrew-library.sh rename to legacy-templating-scripts/generate-stackbrew-library.sh diff --git a/stackbrew-generated/7-4.txt b/legacy-templating-scripts/stackbrew-generated/7-4.txt similarity index 100% rename from stackbrew-generated/7-4.txt rename to legacy-templating-scripts/stackbrew-generated/7-4.txt diff --git a/update.sh b/legacy-templating-scripts/update.sh similarity index 100% rename from update.sh rename to legacy-templating-scripts/update.sh diff --git a/versions.json b/legacy-templating-scripts/versions.json similarity index 100% rename from versions.json rename to legacy-templating-scripts/versions.json diff --git a/versions.sh b/legacy-templating-scripts/versions.sh similarity index 100% rename from versions.sh rename to legacy-templating-scripts/versions.sh diff --git a/release-automation/.gitignore b/release-automation/.gitignore new file mode 100644 index 000000000..932765aeb --- /dev/null +++ b/release-automation/.gitignore @@ -0,0 +1,2 @@ +__pycache__/ +venv diff --git a/release-automation/README.md b/release-automation/README.md new file mode 100644 index 000000000..3e07a6d1e --- /dev/null +++ b/release-automation/README.md @@ -0,0 +1,88 @@ +# Docker release process and release automation description + +This readme covers relase process for versions 8 and above. + +In version 8 the docker-library structure has changed. Static Dockerfiles are used instead of templating. Versions live in a different mainline branches and are marked with tags. + +The docker release process goal is to create a PR in official-docker library for library/redis file. + +library/redis stackbrew file should reflect the tags in redis/docker-library-redis repository. + +## Branches and tags + +Mainline branches are named `release/Major.Minor` (e.g. `release/8.2`) + +Each version release is tagged with `vMajor.Minor.Patch` (e.g. `v8.2.1`) + +Milestone releases are tagged with `vMajor.Minor.Patch-Milestone` (e.g. `v8.2.1-m01`). Any suffix after patch version is considered a milestone. + +Suffixes starting with `rc` are considered release candidates and are preferred over suffixes starting with `m` which in turn are preferred over any other suffix. + +Tags without suffix are considered GA (General Availability) releases (e.g. `v8.2.1`). + +Internal releases are milestone releases containing `-int` in their name (e.g. `v8.2.1-m01-int1` or `8.4.0-int3`). They are not released to the public. + +Milestone releases never get latest or any other default tags, like `8`, `8.2`, `8.2.1`, `latest`, `bookworm`, etc. + +For each mainline only one GA release and optionally any number of milestone releases with higher versions than this GA may be published in official-library. + +Each patch version may have only one GA or milestone release, GA release is preferred over milestone release. + +For example for this list of tags the following rules will be applied + +* `v8.2.3-m01` - included because there is neither GA nor any higher milestone versions for 8.2.3 +* `v8.2.2-rc2` - included because it higher version among 8.2.2 +* `v8.2.2-rc1` - excluded because 8.2.2-rc2 is higher version +* `v8.2.2-m01` - excluded because 8.2.2-rc2 is higher version +* `v8.2.1-rc2` - excluded because there is 8.2.1 GA version +* `v8.2.1` - included because it is highest GA for 8.2 +* `v8.2.0` - exluded because 8.2.1 is higher version + +End of life versions are marked with `-eol` suffix (e.g. `v8.0.3-eol`). When there is a at least one minor version tagged with eol all versions in this minor series are considered EOL and are not included in the release file. + +## Creating a release manually + +This process is automated using github workflows. However, it's useful to understand the manual process. + +Determine a mainline branch, e.g `release/8.2` for version `8.2.2`. + +Optionally create a release branch from the mainline branch, e.g. `8.2.2`. + +Modify dockerfiles. + +Test dockerfiles. + +If release branch was created, merge it back to mainline branch. + +Tag commit with `vMajor.Minor.Patch` (e.g. `v8.2.1`) in the mainline branch. + +Push your changes to redis/docker-library-redis repository. + +Create a PR to official-library refering the tag and commit you created. + + +# Release automation tool + +Release automation tool is used to generate library/redis file for official-library. It uses origin repository as a source of truth and follows the process described above. + +## Installation + +### From Source + +```bash +cd release-automation +pip install -e . +``` + +### Development Installation + +```bash +cd release-automation +pip install -e ".[dev]" +``` + +## Usage + +```bash +release-automation --help +``` diff --git a/release-automation/docker/Dockerfile b/release-automation/docker/Dockerfile new file mode 100644 index 000000000..2a6f061c2 --- /dev/null +++ b/release-automation/docker/Dockerfile @@ -0,0 +1,13 @@ +FROM python:3.11-slim-trixie + +RUN apt update && apt -y install git && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* \ + # avoid dubious permissions problem in github CI + && git config --global --add safe.directory '*' + +COPY . /release-automation +RUN pip install -e /release-automation + +ENTRYPOINT ["release-automation"] +CMD ["--help"] diff --git a/release-automation/pyproject.toml b/release-automation/pyproject.toml new file mode 100644 index 000000000..50714fc96 --- /dev/null +++ b/release-automation/pyproject.toml @@ -0,0 +1,86 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "stackbrew-library-generator" +version = "0.1.0" +description = "Stackbrew Library Generator for Redis Docker Images" +authors = [ + {name = "Redis Team", email = "team@redis.io"}, +] +readme = "README.md" +requires-python = ">=3.8" +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", +] +dependencies = [ + "typer[all]>=0.9.0", + "rich>=13.0.0", + "pydantic>=2.0.0", + "packaging>=21.0", +] + +[project.optional-dependencies] +dev = [ + "pytest>=7.0.0", + "pytest-cov>=4.0.0", + "black>=23.0.0", + "isort>=5.12.0", + "mypy>=1.0.0", + "pre-commit>=3.0.0", +] + +[project.scripts] +release-automation = "stackbrew_generator.cli:app" + +[project.urls] +Homepage = "https://github.com/redis/docker-library-redis" +Repository = "https://github.com/redis/docker-library-redis" +Issues = "https://github.com/redis/docker-library-redis/issues" + +[tool.hatch.build.targets.wheel] +packages = ["src/stackbrew_generator"] + +[tool.hatch.build.targets.sdist] +include = [ + "/src", + "/README.md", + "/pyproject.toml", +] + +[tool.black] +line-length = 88 +target-version = ['py38'] +include = '\.pyi?$' + +[tool.isort] +profile = "black" +multi_line_output = 3 + +[tool.mypy] +python_version = "3.8" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = true + +[tool.pytest.ini_options] +testpaths = ["tests"] +python_files = ["test_*.py", "*_test.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] +addopts = [ + "--strict-markers", + "--strict-config", + "--cov=stackbrew_generator", + "--cov-report=term-missing", + "--cov-report=html", + "--cov-report=xml", +] diff --git a/release-automation/src/stackbrew_generator/__init__.py b/release-automation/src/stackbrew_generator/__init__.py new file mode 100644 index 000000000..38a1b1c6e --- /dev/null +++ b/release-automation/src/stackbrew_generator/__init__.py @@ -0,0 +1,3 @@ +"""Stackbrew Library Generator for Redis Docker Images.""" + +__version__ = "0.1.0" diff --git a/release-automation/src/stackbrew_generator/cli.py b/release-automation/src/stackbrew_generator/cli.py new file mode 100644 index 000000000..f572bee7a --- /dev/null +++ b/release-automation/src/stackbrew_generator/cli.py @@ -0,0 +1,248 @@ +"""CLI interface for stackbrew library generator.""" + +import typer +from pathlib import Path +from rich.console import Console +from rich.traceback import install + +from .distribution import DistributionDetector +from .exceptions import StackbrewGeneratorError +from .git_operations import GitClient +from .logging_config import setup_logging +from .stackbrew import StackbrewGenerator, StackbrewUpdater +from .version_filter import VersionFilter + +# Install rich traceback handler +install(show_locals=True) + +app = typer.Typer( + name="release-automation", + help="Generate stackbrew library content for Redis Docker images", + add_completion=False, +) + +# Console for logging and user messages (stderr) +console = Console(stderr=True) + + +def _generate_stackbrew_content(major_version: int, remote: str, verbose: bool) -> str: + """Generate stackbrew content for a major version. + + This helper function contains the common logic for generating stackbrew content + that is used by both generate-stackbrew-content and update-stackbrew-file commands. + + Args: + major_version: Redis major version to process + remote: Git remote to use + verbose: Whether to enable verbose output + + Returns: + Generated stackbrew content as string + + Raises: + typer.Exit: If no versions found or other errors occur + """ + # Initialize components + git_client = GitClient(remote=remote) + version_filter = VersionFilter(git_client) + distribution_detector = DistributionDetector(git_client) + stackbrew_generator = StackbrewGenerator() + + # Get actual Redis versions to process + versions = version_filter.get_actual_major_redis_versions(major_version) + + if not versions: + console.print(f"[red]No versions found for Redis {major_version}.x[/red]") + raise typer.Exit(1) + + # Fetch required refs + refs_to_fetch = [commit for _, commit, _ in versions] + git_client.fetch_refs(refs_to_fetch) + + # Prepare releases list with distribution information + releases = distribution_detector.prepare_releases_list(versions) + + if not releases: + console.print("[red]No releases prepared[/red]") + raise typer.Exit(1) + + # Generate stackbrew library content + entries = stackbrew_generator.generate_stackbrew_library(releases) + output = stackbrew_generator.format_stackbrew_output(entries) + + if not output: + console.print("[yellow]No stackbrew content generated[/yellow]") + raise typer.Exit(1) + + if verbose: + console.print(f"[green]Generated stackbrew library with {len(entries)} entries[/green]") + + return output + + +@app.command(name="generate-stackbrew-content") +def generate_stackbrew_content( + major_version: int = typer.Argument( + ..., + help="Redis major version to process (e.g., 8 for Redis 8.x)" + ), + remote: str = typer.Option( + "origin", + "--remote", + help="Git remote to use for fetching tags and branches" + ), + verbose: bool = typer.Option( + False, + "--verbose", + "-v", + help="Enable verbose output" + ), +) -> None: + """Generate stackbrew library content for Redis Docker images. + + This command: + 1. Fetches Redis version tags from the specified remote + 2. Filters versions to remove EOL and select latest patches + 3. Extracts distribution information from Dockerfiles + 4. Generates appropriate Docker tags for each version/distribution + 5. Outputs stackbrew library content + """ + # Set up logging + setup_logging(verbose=verbose, console=console) + + if verbose: + console.print(f"[bold blue]Stackbrew Library Generator[/bold blue]") + console.print(f"Major version: {major_version}") + console.print(f"Remote: {remote}") + + try: + # Generate stackbrew content using the helper function + output = _generate_stackbrew_content(major_version, remote, verbose) + + # Output the stackbrew library content + print(output) + + except StackbrewGeneratorError as e: + if verbose and hasattr(e, 'get_detailed_message'): + console.print(f"[red]{e.get_detailed_message()}[/red]") + else: + console.print(f"[red]Error: {e}[/red]") + if verbose: + console.print_exception() + raise typer.Exit(1) + except KeyboardInterrupt: + console.print("\n[yellow]Operation cancelled by user[/yellow]") + raise typer.Exit(130) + except Exception as e: + console.print(f"[red]Unexpected error: {e}[/red]") + if verbose: + console.print_exception() + raise typer.Exit(1) + + +@app.command() +def version() -> None: + """Show version information.""" + from . import __version__ + console.print(f"stackbrew-library-generator {__version__}") + + +@app.command() +def update_stackbrew_file( + major_version: int = typer.Argument( + ..., + help="Redis major version to update (e.g., 8 for Redis 8.x)" + ), + input_file: Path = typer.Option( + ..., + "--input", + "-i", + help="Path to the stackbrew library file to update" + ), + output_file: Path = typer.Option( + None, + "--output", + "-o", + help="Output file path (defaults to stdout)" + ), + remote: str = typer.Option( + "origin", + "--remote", + help="Git remote to use for fetching tags and branches" + ), + verbose: bool = typer.Option( + False, + "--verbose", + "-v", + help="Enable verbose output" + ), +) -> None: + """Update stackbrew library file by replacing entries for a specific major version. + + This command: + 1. Reads the existing stackbrew library file + 2. Generates new stackbrew content for the specified major version + 3. Replaces all entries related to that major version in their original position + 4. Preserves the header and entries for other major versions + 5. Outputs to stdout by default, or to specified output file + """ + # Set up logging + setup_logging(verbose=verbose, console=console) + + if not input_file.exists(): + console.print(f"[red]Input file does not exist: {input_file}[/red]") + raise typer.Exit(1) + + if verbose: + console.print(f"[bold blue]Stackbrew Library File Updater[/bold blue]") + console.print(f"Input file: {input_file}") + if output_file: + console.print(f"Output file: {output_file}") + else: + console.print("Output: stdout") + console.print(f"Major version: {major_version}") + console.print(f"Remote: {remote}") + + try: + # Generate new stackbrew content for the major version using helper function + new_content = _generate_stackbrew_content(major_version, remote, verbose) + + # Update the stackbrew file content + updater = StackbrewUpdater() + updated_content = updater.update_stackbrew_content( + input_file, major_version, new_content, verbose + ) + + # Write the updated content + if output_file: + output_file.write_text(updated_content, encoding='utf-8') + if verbose: + console.print(f"[green]Successfully updated {output_file} for Redis {major_version}.x[/green]") + else: + console.print(f"[green]Updated {output_file}[/green]") + else: + # Output to stdout + print(updated_content) + if verbose: + console.print(f"[green]Generated updated stackbrew content for Redis {major_version}.x[/green]") + + except StackbrewGeneratorError as e: + if verbose and hasattr(e, 'get_detailed_message'): + console.print(f"[red]{e.get_detailed_message()}[/red]") + else: + console.print(f"[red]Error: {e}[/red]") + if verbose: + console.print_exception() + raise typer.Exit(1) + except KeyboardInterrupt: + console.print("\n[yellow]Operation cancelled by user[/yellow]") + raise typer.Exit(130) + except Exception as e: + console.print(f"[red]Unexpected error: {e}[/red]") + if verbose: + console.print_exception() + raise typer.Exit(1) + + +if __name__ == "__main__": + app() diff --git a/release-automation/src/stackbrew_generator/distribution.py b/release-automation/src/stackbrew_generator/distribution.py new file mode 100644 index 000000000..5e2080e8e --- /dev/null +++ b/release-automation/src/stackbrew_generator/distribution.py @@ -0,0 +1,118 @@ +"""Distribution detection from Dockerfiles.""" + +from typing import List, Tuple + +from rich.console import Console + +from .exceptions import DistributionError +from .git_operations import GitClient +from .models import Distribution, RedisVersion, Release + +console = Console(stderr=True) + + +class DistributionDetector: + """Detects distribution information from Dockerfiles.""" + + def __init__(self, git_client: GitClient): + """Initialize distribution detector. + + Args: + git_client: Git client for operations + """ + self.git_client = git_client + + def extract_distribution_from_dockerfile(self, dockerfile_content: str) -> Distribution: + """Extract distribution information from Dockerfile content. + + Args: + dockerfile_content: Content of the Dockerfile + + Returns: + Distribution instance + + Raises: + DistributionError: If distribution cannot be detected + """ + # Find the FROM line + from_line = None + for line in dockerfile_content.split('\n'): + line = line.strip() + if line.upper().startswith('FROM '): + from_line = line + break + + if not from_line: + raise DistributionError("No FROM line found in Dockerfile") + + try: + return Distribution.from_dockerfile_line(from_line) + except ValueError as e: + raise DistributionError(f"Failed to parse distribution from FROM line: {e}") from e + + def get_distribution_for_commit(self, commit: str, distro_type: str) -> Distribution: + """Get distribution information for a specific commit and distro type. + + Args: + commit: Git commit hash + distro_type: Distribution type ("debian" or "alpine") + + Returns: + Distribution instance + + Raises: + DistributionError: If distribution cannot be detected + """ + dockerfile_path = f"{distro_type}/Dockerfile" + + try: + dockerfile_content = self.git_client.show_file(commit, dockerfile_path) + console.print(f"[dim]Retrieved {dockerfile_path} from {commit[:8]}[/dim]") + + distribution = self.extract_distribution_from_dockerfile(dockerfile_content) + console.print(f"[dim]Detected distribution: {distribution.type.value} {distribution.name}[/dim]") + + return distribution + + except Exception as e: + raise DistributionError( + f"Failed to get distribution for {distro_type} from {commit}: {e}" + ) from e + + def prepare_releases_list(self, versions: List[Tuple[RedisVersion, str, str]]) -> List[Release]: + """Prepare list of releases with distribution information. + + Args: + versions: List of (RedisVersion, commit, tag_ref) tuples + + Returns: + List of Release objects with distribution information + """ + console.print("[blue]Preparing releases list with distribution information[/blue]") + + releases = [] + distro_types = ["debian", "alpine"] + + for version, commit, tag_ref in versions: + console.print(f"[dim]Processing [bold yellow]{version}[/bold yellow] - {commit[:8]}[/dim]") + + for distro_type in distro_types: + try: + distribution = self.get_distribution_for_commit(commit, distro_type) + + release = Release( + commit=commit, + version=version, + distribution=distribution, + git_fetch_ref=tag_ref + ) + + releases.append(release) + console.print(f"[dim] Added: {release.console_repr()}[/dim]", highlight=False) + + except DistributionError as e: + console.print(f"[yellow]Warning: Failed to process {distro_type} for {version}: {e}[/yellow]") + continue + + console.print(f"[green]Prepared {len(releases)} releases[/green]") + return releases diff --git a/release-automation/src/stackbrew_generator/exceptions.py b/release-automation/src/stackbrew_generator/exceptions.py new file mode 100644 index 000000000..f5b11b2a6 --- /dev/null +++ b/release-automation/src/stackbrew_generator/exceptions.py @@ -0,0 +1,138 @@ +"""Custom exceptions for stackbrew library generation.""" + +from typing import Optional, Any, Dict + + +class StackbrewGeneratorError(Exception): + """Base exception for stackbrew generator errors. + + Provides structured error information with context and suggestions. + """ + + def __init__( + self, + message: str, + context: Optional[Dict[str, Any]] = None, + suggestion: Optional[str] = None, + original_error: Optional[Exception] = None + ): + """Initialize error with context. + + Args: + message: Error message + context: Additional context information + suggestion: Suggested fix or next steps + original_error: Original exception that caused this error + """ + super().__init__(message) + self.context = context or {} + self.suggestion = suggestion + self.original_error = original_error + + def get_detailed_message(self) -> str: + """Get detailed error message with context and suggestions.""" + parts = [str(self)] + + if self.context: + parts.append("Context:") + for key, value in self.context.items(): + parts.append(f" {key}: {value}") + + if self.suggestion: + parts.append(f"Suggestion: {self.suggestion}") + + if self.original_error: + parts.append(f"Original error: {self.original_error}") + + return "\n".join(parts) + + +class GitOperationError(StackbrewGeneratorError): + """Exception raised for Git operation failures.""" + + def __init__( + self, + message: str, + command: Optional[str] = None, + exit_code: Optional[int] = None, + **kwargs + ): + context = kwargs.get('context', {}) + if command: + context['command'] = command + if exit_code is not None: + context['exit_code'] = exit_code + + suggestion = kwargs.get('suggestion') + if not suggestion and command: + if 'ls-remote' in command: + suggestion = "Check that the remote repository exists and is accessible" + elif 'fetch' in command: + suggestion = "Ensure you have network access and proper Git credentials" + elif 'show' in command: + suggestion = "Verify that the commit exists and contains the requested file" + + super().__init__(message, context=context, suggestion=suggestion, **kwargs) + + +class VersionParsingError(StackbrewGeneratorError): + """Exception raised for version parsing failures.""" + + def __init__(self, message: str, version_string: Optional[str] = None, **kwargs): + context = kwargs.get('context', {}) + if version_string: + context['version_string'] = version_string + + suggestion = kwargs.get('suggestion', + "Version should be in format 'X.Y.Z' or 'vX.Y.Z' with optional suffix") + + super().__init__(message, context=context, suggestion=suggestion, **kwargs) + + +class DistributionError(StackbrewGeneratorError): + """Exception raised for distribution detection failures.""" + + def __init__( + self, + message: str, + dockerfile_path: Optional[str] = None, + from_line: Optional[str] = None, + **kwargs + ): + context = kwargs.get('context', {}) + if dockerfile_path: + context['dockerfile_path'] = dockerfile_path + if from_line: + context['from_line'] = from_line + + suggestion = kwargs.get('suggestion', + "Dockerfile should have a FROM line with supported base image (alpine:* or debian:*)") + + super().__init__(message, context=context, suggestion=suggestion, **kwargs) + + +class ValidationError(StackbrewGeneratorError): + """Exception raised for validation failures.""" + + def __init__(self, message: str, field: Optional[str] = None, value: Optional[Any] = None, **kwargs): + context = kwargs.get('context', {}) + if field: + context['field'] = field + if value is not None: + context['value'] = value + + super().__init__(message, context=context, **kwargs) + + +class ConfigurationError(StackbrewGeneratorError): + """Exception raised for configuration errors.""" + + def __init__(self, message: str, config_key: Optional[str] = None, **kwargs): + context = kwargs.get('context', {}) + if config_key: + context['config_key'] = config_key + + suggestion = kwargs.get('suggestion', + "Check your configuration and environment variables") + + super().__init__(message, context=context, suggestion=suggestion, **kwargs) diff --git a/release-automation/src/stackbrew_generator/git_operations.py b/release-automation/src/stackbrew_generator/git_operations.py new file mode 100644 index 000000000..14dab035c --- /dev/null +++ b/release-automation/src/stackbrew_generator/git_operations.py @@ -0,0 +1,156 @@ +"""Git operations for stackbrew library generation.""" + +import re +import subprocess +from typing import Dict, List, Tuple + +from rich.console import Console + +from .exceptions import GitOperationError +from .models import RedisVersion + +console = Console(stderr=True) + + +class GitClient: + """Client for Git operations.""" + + def __init__(self, remote: str = "origin"): + """Initialize Git client. + + Args: + remote: Git remote name to use + """ + self.remote = remote + + def _run_command(self, cmd: List[str], capture_output: bool = True) -> subprocess.CompletedProcess: + """Run a git command with error handling. + + Args: + cmd: Command and arguments to run + capture_output: Whether to capture stdout/stderr + + Returns: + CompletedProcess result + + Raises: + GitOperationError: If command fails + """ + try: + result = subprocess.run( + cmd, + capture_output=capture_output, + text=True, + check=True, + ) + return result + except subprocess.CalledProcessError as e: + error_msg = f"Git command failed: {' '.join(cmd)}" + if e.stderr: + error_msg += f"\nError: {e.stderr.strip()}" + raise GitOperationError(error_msg) from e + except FileNotFoundError as e: + raise GitOperationError("Git command not found. Is git installed?") from e + + def list_remote_tags(self, major_version: int) -> List[Tuple[str, str]]: + """List remote tags for a specific major version. + + Args: + major_version: Major version to filter tags for + + Returns: + List of (commit, tag_ref) tuples + + Raises: + GitOperationError: If no tags found or git operation fails + """ + console.print(f"[dim]Listing remote tags for v{major_version}.*[/dim]") + + cmd = [ + "git", "ls-remote", "--refs", "--tags", + self.remote, f"refs/tags/v{major_version}.*" + ] + + result = self._run_command(cmd) + + if not result.stdout.strip(): + raise GitOperationError(f"No tags found for major version {major_version}") + + tags = [] + for line in result.stdout.strip().split('\n'): + if line: + commit, ref = line.split('\t', 1) + tags.append((commit, ref)) + + console.print(f"[dim]Found {len(tags)} tags[/dim]") + return tags + + def fetch_refs(self, refs: List[str]) -> None: + """Fetch specific refs from remote. + + Args: + refs: List of refs to fetch + + Raises: + GitOperationError: If fetch operation fails + """ + if not refs: + return + + console.print(f"[dim]Fetching {len(refs)} refs[/dim]") + + # Use git fetch with unshallow to ensure we have full history + cmd = ["git", "fetch", "--unshallow", self.remote] + refs + + try: + self._run_command(cmd, capture_output=False) + except GitOperationError: + # If --unshallow fails (repo already unshallow), try without it + cmd = ["git", "fetch", self.remote] + refs + self._run_command(cmd, capture_output=False) + + def show_file(self, commit: str, file_path: str) -> str: + """Show file content from a specific commit. + + Args: + commit: Git commit hash + file_path: Path to file in repository + + Returns: + File content as string + + Raises: + GitOperationError: If file cannot be retrieved + """ + cmd = ["git", "show", f"{commit}:{file_path}"] + + try: + result = self._run_command(cmd) + return result.stdout + except GitOperationError as e: + raise GitOperationError(f"Failed to get {file_path} from {commit}: {e}") from e + + def extract_version_from_tag(self, tag_ref: str, major_version: int) -> RedisVersion: + """Extract Redis version from tag reference. + + Args: + tag_ref: Git tag reference (e.g., refs/tags/v8.2.1) + major_version: Expected major version for validation + + Returns: + Parsed RedisVersion + + Raises: + GitOperationError: If tag format is invalid + """ + # Extract version from tag reference + match = re.search(rf"v{major_version}\.\d+(?:\.\d+)?.*", tag_ref) + if not match: + raise GitOperationError(f"Invalid tag format: {tag_ref}") + + version_str = match.group(0) + + try: + return RedisVersion.parse(version_str) + except ValueError as e: + raise GitOperationError(f"Failed to parse version from {tag_ref}: {e}") from e diff --git a/release-automation/src/stackbrew_generator/logging_config.py b/release-automation/src/stackbrew_generator/logging_config.py new file mode 100644 index 000000000..b261e7ae7 --- /dev/null +++ b/release-automation/src/stackbrew_generator/logging_config.py @@ -0,0 +1,95 @@ +"""Logging configuration for stackbrew generator.""" + +import logging +from typing import Optional + +from rich.console import Console +from rich.logging import RichHandler + + +def setup_logging( + level: str = "INFO", + verbose: bool = False, + console: Optional[Console] = None +) -> logging.Logger: + """Set up logging configuration. + + Args: + level: Logging level (DEBUG, INFO, WARNING, ERROR) + verbose: Enable verbose logging + console: Rich console instance to use (should use stderr) + + Returns: + Configured logger instance + """ + if console is None: + # Create console that outputs to stderr + console = Console(stderr=True) + + # Determine log level + if verbose: + log_level = logging.DEBUG + else: + log_level = getattr(logging, level.upper(), logging.INFO) + + # Configure root logger + logging.basicConfig( + level=log_level, + format="%(message)s", + datefmt="[%X]", + handlers=[ + RichHandler( + console=console, + show_path=verbose, + show_time=verbose, + rich_tracebacks=True, + tracebacks_show_locals=verbose, + ) + ], + ) + + # Get logger for our package + logger = logging.getLogger("stackbrew_generator") + logger.setLevel(log_level) + + return logger + + +def get_logger(name: str) -> logging.Logger: + """Get a logger instance for a specific module. + + Args: + name: Logger name (usually __name__) + + Returns: + Logger instance + """ + return logging.getLogger(f"stackbrew_generator.{name}") + + +class LoggingMixin: + """Mixin class to add logging capabilities to other classes.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.logger = get_logger(self.__class__.__name__) + + def log_debug(self, message: str, *args, **kwargs) -> None: + """Log debug message.""" + self.logger.debug(message, *args, **kwargs) + + def log_info(self, message: str, *args, **kwargs) -> None: + """Log info message.""" + self.logger.info(message, *args, **kwargs) + + def log_warning(self, message: str, *args, **kwargs) -> None: + """Log warning message.""" + self.logger.warning(message, *args, **kwargs) + + def log_error(self, message: str, *args, **kwargs) -> None: + """Log error message.""" + self.logger.error(message, *args, **kwargs) + + def log_exception(self, message: str, *args, **kwargs) -> None: + """Log exception with traceback.""" + self.logger.exception(message, *args, **kwargs) diff --git a/release-automation/src/stackbrew_generator/models.py b/release-automation/src/stackbrew_generator/models.py new file mode 100644 index 000000000..8a9a1fff7 --- /dev/null +++ b/release-automation/src/stackbrew_generator/models.py @@ -0,0 +1,206 @@ +"""Data models for stackbrew library generation.""" + +import re +from enum import Enum +from typing import List, Optional + +from pydantic import BaseModel, Field, validator + + +class DistroType(str, Enum): + """Distribution type enumeration.""" + + ALPINE = "alpine" + DEBIAN = "debian" + + +class RedisVersion(BaseModel): + """Represents a parsed Redis version.""" + + major: int = Field(..., ge=1, description="Major version number") + minor: int = Field(..., ge=0, description="Minor version number") + patch: Optional[int] = Field(None, ge=0, description="Patch version number") + suffix: str = Field("", description="Version suffix (e.g., -m01, -rc1, -eol)") + + @classmethod + def parse(cls, version_str: str) -> "RedisVersion": + """Parse a version string into components. + + Args: + version_str: Version string (e.g., "v8.2.1-m01", "8.2", "7.4.0-eol") + + Returns: + RedisVersion instance + + Raises: + ValueError: If version string format is invalid + """ + # Remove 'v' prefix if present + version = version_str.lstrip("v") + + # Extract numeric part and suffix + match = re.match(r"^([1-9]\d*\.\d+(?:\.\d+)?)(.*)", version) + if not match: + raise ValueError(f"Invalid version format: {version_str}") + + numeric_part, suffix = match.groups() + + # Parse numeric components + parts = numeric_part.split(".") + major = int(parts[0]) + minor = int(parts[1]) + patch = int(parts[2]) if len(parts) > 2 else None + + return cls(major=major, minor=minor, patch=patch, suffix=suffix) + + @property + def is_milestone(self) -> bool: + """Check if this is a milestone version (has suffix).""" + return bool(self.suffix) + + @property + def is_eol(self) -> bool: + """Check if this version is end-of-life.""" + return self.suffix.lower().endswith("-eol") + + @property + def mainline_version(self) -> str: + """Get the mainline version string (major.minor).""" + return f"{self.major}.{self.minor}" + + @property + def sort_key(self) -> str: + suffix_weight = 0 + if self.suffix.startswith("rc"): + suffix_weight = 100 + elif self.suffix.startswith("m"): + suffix_weight = 50 + + return f"{self.major}.{self.minor}.{self.patch or 0}.{suffix_weight}.{self.suffix}" + + def __str__(self) -> str: + """String representation of the version.""" + version = f"{self.major}.{self.minor}" + if self.patch is not None: + version += f".{self.patch}" + return version + self.suffix + + def __lt__(self, other: "RedisVersion") -> bool: + """Compare versions for sorting.""" + if not isinstance(other, RedisVersion): + return NotImplemented + + # Compare major.minor.patch first + self_tuple = (self.major, self.minor, self.patch or 0) + other_tuple = (other.major, other.minor, other.patch or 0) + + if self_tuple != other_tuple: + return self_tuple < other_tuple + + # If numeric parts are equal, compare suffixes + # Empty suffix (GA) comes after suffixes (milestones) + if not self.suffix and other.suffix: + return False + if self.suffix and not other.suffix: + return True + + return self.suffix < other.suffix + + +class Distribution(BaseModel): + """Represents a Linux distribution.""" + + type: DistroType = Field(..., description="Distribution type") + name: str = Field(..., description="Distribution name/version") + + @classmethod + def from_dockerfile_line(cls, from_line: str) -> "Distribution": + """Parse distribution from Dockerfile FROM line. + + Args: + from_line: FROM line from Dockerfile (e.g., "FROM alpine:3.22") + + Returns: + Distribution instance + + Raises: + ValueError: If FROM line format is not supported + """ + # Extract base image from FROM line + parts = from_line.strip().split() + if len(parts) < 2 or parts[0].upper() != "FROM": + raise ValueError(f"Invalid FROM line: {from_line}") + + base_img = parts[1] + + if "alpine:" in base_img: + # Extract alpine version (e.g., alpine:3.22 -> alpine3.22) + version = base_img.split(":", 1)[1] + return cls(type=DistroType.ALPINE, name=f"alpine{version}") + elif "debian:" in base_img: + # Extract debian version, remove -slim suffix + version = base_img.split(":", 1)[1].replace("-slim", "") + return cls(type=DistroType.DEBIAN, name=version) + else: + raise ValueError(f"Unsupported base image: {base_img}") + + @property + def is_default(self) -> bool: + """Check if this is the default distribution (Debian).""" + return self.type == DistroType.DEBIAN + + @property + def tag_names(self) -> List[str]: + """Get tag name components for this distribution.""" + if self.type == DistroType.ALPINE: + return [self.type.value, self.name] + else: + return [self.name] + + +class Release(BaseModel): + """Represents a Redis release with distribution information.""" + + commit: str = Field(..., description="Git commit hash") + version: RedisVersion = Field(..., description="Redis version") + distribution: Distribution = Field(..., description="Linux distribution") + git_fetch_ref: str = Field(..., description="Git fetch reference (e.g., refs/tags/v8.2.1)") + + def __str__(self) -> str: + """String representation of the release.""" + return f"{self.commit[:8]} {self.version} {self.distribution.type.value} {self.distribution.name}" + + def console_repr(self) -> str: + """Rich console representation with markup.""" + return f"{self.commit[:8]} [bold yellow]{self.version}[/bold yellow] {self.distribution.type.value} [bold yellow]{self.distribution.name}[/bold yellow]" + + +class StackbrewEntry(BaseModel): + """Represents a stackbrew library entry with tags.""" + + tags: List[str] = Field(..., description="Docker tags for this entry") + commit: str = Field(..., description="Git commit hash") + version: RedisVersion = Field(..., description="Redis version") + distribution: Distribution = Field(..., description="Linux distribution") + git_fetch_ref: str = Field(..., description="Git fetch reference (e.g., refs/tags/v8.2.1)") + + @property + def architectures(self) -> List[str]: + """Get supported architectures based on distribution type.""" + if self.distribution.type == DistroType.DEBIAN: + return ["amd64", "arm32v5", "arm32v7", "arm64v8", "i386", "mips64le", "ppc64le", "s390x"] + elif self.distribution.type == DistroType.ALPINE: + return ["amd64", "arm32v6", "arm32v7", "arm64v8", "i386", "ppc64le", "riscv64", "s390x"] + else: + # Fallback to debian architectures for unknown distributions + return ["amd64", "arm32v5", "arm32v7", "arm64v8", "i386", "mips64le", "ppc64le", "s390x"] + + def __str__(self) -> str: + """String representation in stackbrew format.""" + lines = [] + lines.append(f"Tags: {', '.join(self.tags)}") + lines.append(f"Architectures: {', '.join(self.architectures)}") + lines.append(f"GitCommit: {self.commit}") + lines.append(f"GitFetch: {self.git_fetch_ref}") + lines.append(f"Directory: {self.distribution.type.value}") + return "\n".join(lines) diff --git a/release-automation/src/stackbrew_generator/stackbrew.py b/release-automation/src/stackbrew_generator/stackbrew.py new file mode 100644 index 000000000..faac43b94 --- /dev/null +++ b/release-automation/src/stackbrew_generator/stackbrew.py @@ -0,0 +1,295 @@ +"""Stackbrew library generation.""" + +import re +from pathlib import Path +from typing import List + +from rich.console import Console + +from .models import Release, StackbrewEntry + +console = Console(stderr=True) + + +class StackbrewGenerator: + """Generates stackbrew library content.""" + + def generate_tags_for_release( + self, + release: Release, + is_latest: bool = False + ) -> List[str]: + """Generate Docker tags for a release. + + Args: + release: Release to generate tags for + is_latest: Whether this is the latest version + + Returns: + List of Docker tags + """ + tags = [] + version = release.version + distribution = release.distribution + + # Base version tags + version_tags = [str(version)] + + # Add mainline version tag only for GA releases (no suffix) + if not version.is_milestone: + version_tags.append(version.mainline_version) + + # Add major version tag for latest versions + if is_latest: + version_tags.append(str(version.major)) + + # For default distribution (Debian), add version tags without distro suffix + if distribution.is_default: + tags.extend(version_tags) + + # Add distro-specific tags + for distro_name in distribution.tag_names: + for version_tag in version_tags: + tags.append(f"{version_tag}-{distro_name}") + + # Add special latest tags + if is_latest: + if distribution.is_default: + tags.append("latest") + # Add bare distro names as tags + tags.extend(distribution.tag_names) + + return tags + + def generate_stackbrew_library(self, releases: List[Release]) -> List[StackbrewEntry]: + """Generate stackbrew library entries from releases. + + Args: + releases: List of releases to process + + Returns: + List of StackbrewEntry objects + """ + console.print("[blue]Generating stackbrew library content[/blue]") + + if not releases: + console.print("[yellow]No releases to process[/yellow]") + return [] + + entries = [] + latest_minor = None + latest_minor_unset = True + + for release in releases: + # Determine latest version following bash logic: + # - Set latest_minor to the minor version of the first non-milestone version + # - Clear latest_minor if subsequent versions have different minor versions + if latest_minor_unset: + if not release.version.is_milestone: + latest_minor = release.version.minor + latest_minor_unset = False + console.print(f"[dim]Latest minor version set to: {latest_minor}[/dim]") + elif latest_minor != release.version.minor: + latest_minor = None + + # Check if this release should get latest tags + is_latest = latest_minor is not None + + # Generate tags for this release + tags = self.generate_tags_for_release(release, is_latest) + + if tags: + entry = StackbrewEntry( + tags=tags, + commit=release.commit, + version=release.version, + distribution=release.distribution, + git_fetch_ref=release.git_fetch_ref + ) + entries.append(entry) + + console.print(f"[dim]{release.console_repr()} -> {len(tags)} tags[/dim]") + else: + console.print(f"[yellow]No tags generated for {release}[/yellow]") + + console.print(f"[green]Generated {len(entries)} stackbrew entries[/green]") + console.print(f"[dim]{self.format_stackbrew_output(entries)}[/dim]") + return entries + + def format_stackbrew_output(self, entries: List[StackbrewEntry]) -> str: + """Format stackbrew entries as output string. + + Args: + entries: List of stackbrew entries + + Returns: + Formatted stackbrew library content + """ + if not entries: + return "" + + lines = [] + for i, entry in enumerate(entries): + if i > 0: + lines.append("") # Add blank line between entries + lines.append(str(entry)) + + return "\n".join(lines) + + +class StackbrewUpdater: + """Updates stackbrew library files by replacing entries for specific major versions.""" + + def __init__(self): + """Initialize the updater.""" + pass + + def update_stackbrew_content(self, input_file: Path, major_version: int, new_content: str, verbose: bool = False) -> str: + """Update stackbrew file content by replacing entries for a specific major version. + + Args: + input_file: Path to the input stackbrew file + major_version: Major version to replace entries for + new_content: New stackbrew content to insert + verbose: Whether to print verbose output + + Returns: + Updated stackbrew file content + """ + content = input_file.read_text(encoding='utf-8') + lines = content.split('\n') + + # Find header (everything before the first Tags: line) + header_lines = [] + content_start_idx = 0 + + for i, line in enumerate(lines): + if line.startswith('Tags:'): + content_start_idx = i + break + header_lines.append(line) + + if content_start_idx == 0 and not any(line.startswith('Tags:') for line in lines): + # No existing entries, just append new content + if verbose: + console.print("[dim]No existing entries found, appending new content[/dim]") + return content.rstrip() + '\n\n' + new_content + + # Parse entries and find where target major version entries start and end + entries = self._parse_stackbrew_entries(lines[content_start_idx:]) + target_entries = [] + other_entries_before = [] + other_entries_after = [] + target_start_found = False + target_end_found = False + removed_count = 0 + + for entry in entries: + if self._entry_belongs_to_major_version(entry, major_version): + target_entries.append(entry) + removed_count += 1 + if not target_start_found: + target_start_found = True + elif not target_start_found: + # Entries before target major version + other_entries_before.append(entry) + else: + # Entries after target major version + other_entries_after.append(entry) + if not target_end_found: + target_end_found = True + + if verbose: + if removed_count > 0: + console.print(f"[dim]Removed {removed_count} existing entries for Redis {major_version}.x[/dim]") + else: + console.print(f"[dim]No existing entries found for Redis {major_version}.x, placing at end[/dim]") + + # Reconstruct the file + result_lines = header_lines[:] + + # Add entries before target major version + for entry in other_entries_before: + if result_lines and result_lines[-1].strip(): # Add blank line if needed + result_lines.append('') + result_lines.extend(entry) + + # Add new content for the target major version + if result_lines and result_lines[-1].strip(): # Add blank line if needed + result_lines.append('') + result_lines.extend(new_content.split('\n')) + + # Add entries after target major version + for entry in other_entries_after: + if result_lines and result_lines[-1].strip(): # Add blank line if needed + result_lines.append('') + result_lines.extend(entry) + + return '\n'.join(result_lines) + + def _parse_stackbrew_entries(self, lines: List[str]) -> List[List[str]]: + """Parse stackbrew entries from lines, returning list of entry line groups. + + Args: + lines: Lines to parse + + Returns: + List of entry line groups + """ + entries = [] + current_entry = [] + + for line in lines: + line = line.rstrip() + + if line.startswith('Tags:') and current_entry: + # Start of new entry, save the previous one + entries.append(current_entry) + current_entry = [line] + elif line.startswith('Tags:'): + # First entry + current_entry = [line] + elif current_entry and (line.startswith(('Architectures:', 'GitCommit:', 'GitFetch:', 'Directory:')) or line.strip() == ''): + # Part of current entry + current_entry.append(line) + elif not line.strip() and not current_entry: + # Empty line before any entry starts, skip + continue + elif not line.strip() and current_entry: + # Empty line after entry content - end of entry + if current_entry: + entries.append(current_entry) + current_entry = [] + + # Don't forget the last entry + if current_entry: + entries.append(current_entry) + + return entries + + def _entry_belongs_to_major_version(self, entry_lines: List[str], major_version: int) -> bool: + """Check if a stackbrew entry belongs to the specified major version. + + Args: + entry_lines: Lines of the stackbrew entry + major_version: Major version to check for + + Returns: + True if the entry belongs to the major version + """ + for line in entry_lines: + if line.startswith('Tags:'): + tags_line = line[5:].strip() # Remove 'Tags:' prefix + tags = [tag.strip() for tag in tags_line.split(',')] + + # Check if any tag indicates this major version + for tag in tags: + # Look for patterns like "8", "8.2", "8.2.1", "8-alpine", etc. + if re.match(rf'^{major_version}(?:\.|$|-)', tag): + return True + # Also check for "latest" tag which typically belongs to the highest major version + # But we'll be conservative and not assume latest belongs to our major version + # unless we have other evidence + break + + return False diff --git a/release-automation/src/stackbrew_generator/version_filter.py b/release-automation/src/stackbrew_generator/version_filter.py new file mode 100644 index 000000000..443ca1818 --- /dev/null +++ b/release-automation/src/stackbrew_generator/version_filter.py @@ -0,0 +1,156 @@ +"""Version filtering and processing for Redis releases.""" + +from typing import Dict, List, Tuple + +from collections import OrderedDict + +from packaging.version import Version +from rich.console import Console + +from .git_operations import GitClient +from .models import RedisVersion + +console = Console(stderr=True) + + +class VersionFilter: + """Filters and processes Redis versions.""" + + def __init__(self, git_client: GitClient): + """Initialize version filter. + + Args: + git_client: Git client for operations + """ + self.git_client = git_client + + def get_redis_versions_from_tags(self, major_version: int) -> List[Tuple[RedisVersion, str, str]]: + """Get Redis versions from git tags. + + Args: + major_version: Major version to filter for + + Returns: + List of (RedisVersion, commit, tag_ref) tuples sorted by version (newest first) + """ + console.print(f"[blue]Getting Redis versions for major version {major_version}[/blue]") + + # Get remote tags + tags = self.git_client.list_remote_tags(major_version) + + # Parse versions from tags + versions = [] + for commit, tag_ref in tags: + try: + version = self.git_client.extract_version_from_tag(tag_ref, major_version) + versions.append((version, commit, tag_ref)) + except Exception as e: + console.print(f"[yellow]Warning: Skipping invalid tag {tag_ref}: {e}[/yellow]") + continue + + # Sort by version (newest first) + versions.sort(key=lambda x: x[0].sort_key, reverse=True) + + console.print(f"[dim]Parsed {len(versions)} valid versions[/dim]") + return versions + + + def filter_eol_versions(self, versions: List[Tuple[RedisVersion, str, str]]) -> List[Tuple[RedisVersion, str, str]]: + """Filter out end-of-life versions. + + Args: + versions: List of (RedisVersion, commit, tag_ref) tuples + + Returns: + Filtered list with EOL minor versions removed + """ + console.print("[blue]Filtering out EOL versions[/blue]") + + # Group versions by minor version + minor_versions: Dict[str, List[Tuple[RedisVersion, str, str]]] = {} + for version, commit, tag_ref in versions: + minor_key = version.mainline_version + if minor_key not in minor_versions: + minor_versions[minor_key] = [] + minor_versions[minor_key].append((version, commit, tag_ref)) + + # Check each minor version for EOL marker + filtered_versions = [] + for minor_key, minor_group in minor_versions.items(): + # Check if any version in this minor series is marked as EOL + has_eol = any(version.is_eol for version, _, _ in minor_group) + + if has_eol: + console.print(f"[yellow]Skipping minor version {minor_key}.* due to EOL[/yellow]") + else: + filtered_versions.extend(minor_group) + + # Sort again after filtering + filtered_versions.sort(key=lambda x: x[0].sort_key, reverse=True) + + console.print(f"[dim]Kept {len(filtered_versions)} versions after EOL filtering[/dim]") + return filtered_versions + + def filter_actual_versions(self, versions: List[Tuple[RedisVersion, str, str]]) -> List[Tuple[RedisVersion, str, str]]: + """Filter to keep only the latest patch version for each minor version and milestone status. + + Args: + versions: List of (RedisVersion, commit, tag_ref) tuples (should be sorted newest first) + + Returns: + Filtered list with only the latest versions for each minor/milestone combination + """ + console.print("[blue]Filtering to actual versions (latest patch per minor/milestone)[/blue]") + + patch_versions = OrderedDict() + + for version, commit, tag_ref in versions: + patch_key = (version.major, version.minor, version.patch) + if patch_key not in patch_versions: + patch_versions[patch_key] = (version, commit, tag_ref) + elif patch_versions[patch_key][0].is_milestone and not version.is_milestone: + # GA always takes precedence over milestone for the same major.minor.patch + patch_versions[patch_key] = (version, commit, tag_ref) + + filtered_versions = [] + mainlines_with_ga = set() + + for version, commit, tag_ref in patch_versions.values(): + if version.mainline_version not in mainlines_with_ga: + if not version.is_milestone: + mainlines_with_ga.add(version.mainline_version) + filtered_versions.append((version, commit, tag_ref)) + return filtered_versions + + def get_actual_major_redis_versions(self, major_version: int) -> List[Tuple[RedisVersion, str, str]]: + """Get the actual Redis versions to process for a major version. + + This is the main entry point that combines all filtering steps: + 1. Get versions from git tags + 2. Filter out EOL versions + 3. Filter to actual versions (latest patch per minor/milestone) + + Args: + major_version: Major version to process + + Returns: + List of (RedisVersion, commit, tag_ref) tuples for processing + """ + console.print(f"[bold blue]Processing Redis {major_version}.x versions[/bold blue]") + + # Get all versions from tags + versions = self.get_redis_versions_from_tags(major_version) + + if not versions: + console.print(f"[red]No versions found for major version {major_version}[/red]") + return [] + + # Apply filters + versions = self.filter_eol_versions(versions) + versions = self.filter_actual_versions(versions) + + console.print(f"[green]Final selection: {len(versions)} versions to process[/green]") + for version, commit, tag_ref in versions: + console.print(f"[green] [bold yellow]{version}[/bold yellow] - {commit[:8]}[/green]") + + return versions diff --git a/release-automation/tests/__init__.py b/release-automation/tests/__init__.py new file mode 100644 index 000000000..a60351fa0 --- /dev/null +++ b/release-automation/tests/__init__.py @@ -0,0 +1 @@ +"""Test package for stackbrew library generator.""" diff --git a/release-automation/tests/test_integration.py b/release-automation/tests/test_integration.py new file mode 100644 index 000000000..2a11548a1 --- /dev/null +++ b/release-automation/tests/test_integration.py @@ -0,0 +1,57 @@ +"""Integration tests for the stackbrew generator.""" + +import pytest +from unittest.mock import Mock, patch + +from stackbrew_generator.cli import app +from stackbrew_generator.models import RedisVersion, Distribution, DistroType +from typer.testing import CliRunner + + +class TestIntegration: + """Integration tests for the complete workflow.""" + + def setup_method(self): + """Set up test fixtures.""" + self.runner = CliRunner() + + def test_version_command(self): + """Test version command.""" + result = self.runner.invoke(app, ["version"]) + assert result.exit_code == 0 + assert "stackbrew-library-generator" in result.stderr + + def test_invalid_major_version(self): + """Test handling of invalid major version.""" + result = self.runner.invoke(app, ["generate-stackbrew-content", "0"]) + assert result.exit_code != 0 + + @patch('stackbrew_generator.git_operations.GitClient') + def test_no_tags_found(self, mock_git_client_class): + """Test handling when no tags are found.""" + # Mock git client to return no tags + mock_git_client = Mock() + mock_git_client_class.return_value = mock_git_client + mock_git_client.list_remote_tags.return_value = [] + + result = self.runner.invoke(app, ["generate-stackbrew-content", "99"]) + assert result.exit_code == 1 + assert "No tags found" in result.stderr + + @patch('stackbrew_generator.version_filter.VersionFilter.get_actual_major_redis_versions') + def test_no_versions_found(self, mock_get_versions): + """Test handling when no versions are found.""" + # Mock git client to return no tags + mock_get_versions.return_value = [] + + result = self.runner.invoke(app, ["generate-stackbrew-content", "8"]) + #assert result.exit_code == 1 + assert "No versions found" in result.stderr + + def test_help_output(self): + """Test help output.""" + result = self.runner.invoke(app, ["generate-stackbrew-content", "--help"]) + assert result.exit_code == 0 + assert "Generate stackbrew library content" in result.stdout + assert "--remote" in result.stdout + assert "--verbose" in result.stdout diff --git a/release-automation/tests/test_models.py b/release-automation/tests/test_models.py new file mode 100644 index 000000000..0feef1f94 --- /dev/null +++ b/release-automation/tests/test_models.py @@ -0,0 +1,239 @@ +"""Tests for data models.""" + +import pytest + +from stackbrew_generator.models import RedisVersion, Distribution, DistroType, Release, StackbrewEntry + + +class TestRedisVersion: + """Tests for RedisVersion model.""" + + def test_parse_basic_version(self): + """Test parsing basic version strings.""" + version = RedisVersion.parse("8.2.1") + assert version.major == 8 + assert version.minor == 2 + assert version.patch == 1 + assert version.suffix == "" + + def test_parse_version_with_v_prefix(self): + """Test parsing version with 'v' prefix.""" + version = RedisVersion.parse("v8.2.1") + assert version.major == 8 + assert version.minor == 2 + assert version.patch == 1 + assert version.suffix == "" + + def test_parse_version_with_suffix(self): + """Test parsing version with suffix.""" + version = RedisVersion.parse("8.2.1-m01") + assert version.major == 8 + assert version.minor == 2 + assert version.patch == 1 + assert version.suffix == "-m01" + + def test_parse_version_without_patch(self): + """Test parsing version without patch number.""" + version = RedisVersion.parse("8.2") + assert version.major == 8 + assert version.minor == 2 + assert version.patch is None + assert version.suffix == "" + + def test_parse_eol_version(self): + """Test parsing EOL version.""" + version = RedisVersion.parse("7.4.0-eol") + assert version.major == 7 + assert version.minor == 4 + assert version.patch == 0 + assert version.suffix == "-eol" + assert version.is_eol is True + + def test_parse_invalid_version(self): + """Test parsing invalid version strings.""" + with pytest.raises(ValueError): + RedisVersion.parse("invalid") + + with pytest.raises(ValueError): + RedisVersion.parse("0.1.0") # Major version must be >= 1 + + def test_is_milestone(self): + """Test milestone detection.""" + ga_version = RedisVersion.parse("8.2.1") + milestone_version = RedisVersion.parse("8.2.1-m01") + + assert ga_version.is_milestone is False + assert milestone_version.is_milestone is True + + def test_mainline_version(self): + """Test mainline version property.""" + version = RedisVersion.parse("8.2.1-m01") + assert version.mainline_version == "8.2" + + def test_string_representation(self): + """Test string representation.""" + version1 = RedisVersion.parse("8.2.1") + version2 = RedisVersion.parse("8.2.1-m01") + version3 = RedisVersion.parse("8.2") + + assert str(version1) == "8.2.1" + assert str(version2) == "8.2.1-m01" + assert str(version3) == "8.2" + + def test_version_comparison(self): + """Test version comparison for sorting.""" + v1 = RedisVersion.parse("8.2.1") + v2 = RedisVersion.parse("8.2.2") + v3 = RedisVersion.parse("8.2.1-m01") + v4 = RedisVersion.parse("8.3.0") + + # Test numeric comparison + assert v1 < v2 + assert v2 < v4 + + # Test milestone vs GA (GA comes after milestone) + assert v3 < v1 + + # Test sorting + versions = [v4, v1, v3, v2] + sorted_versions = sorted(versions) + assert sorted_versions == [v3, v1, v2, v4] + + +class TestDistribution: + """Tests for Distribution model.""" + + def test_from_dockerfile_alpine(self): + """Test parsing Alpine distribution from Dockerfile.""" + distro = Distribution.from_dockerfile_line("FROM alpine:3.22") + assert distro.type == DistroType.ALPINE + assert distro.name == "alpine3.22" + + def test_from_dockerfile_debian(self): + """Test parsing Debian distribution from Dockerfile.""" + distro = Distribution.from_dockerfile_line("FROM debian:bookworm") + assert distro.type == DistroType.DEBIAN + assert distro.name == "bookworm" + + def test_from_dockerfile_debian_slim(self): + """Test parsing Debian slim distribution from Dockerfile.""" + distro = Distribution.from_dockerfile_line("FROM debian:bookworm-slim") + assert distro.type == DistroType.DEBIAN + assert distro.name == "bookworm" + + def test_from_dockerfile_invalid(self): + """Test parsing invalid Dockerfile lines.""" + with pytest.raises(ValueError): + Distribution.from_dockerfile_line("INVALID LINE") + + with pytest.raises(ValueError): + Distribution.from_dockerfile_line("FROM unsupported:latest") + + def test_is_default(self): + """Test default distribution detection.""" + alpine = Distribution(type=DistroType.ALPINE, name="alpine3.22") + debian = Distribution(type=DistroType.DEBIAN, name="bookworm") + + assert alpine.is_default is False + assert debian.is_default is True + + def test_tag_names(self): + """Test tag name generation.""" + alpine = Distribution(type=DistroType.ALPINE, name="alpine3.22") + debian = Distribution(type=DistroType.DEBIAN, name="bookworm") + + assert alpine.tag_names == ["alpine", "alpine3.22"] + assert debian.tag_names == ["bookworm"] + + +class TestRelease: + """Tests for Release model.""" + + def test_release_creation(self): + """Test creating a Release instance.""" + version = RedisVersion.parse("8.2.1") + distribution = Distribution(type=DistroType.DEBIAN, name="bookworm") + + release = Release( + commit="abc123def456", + version=version, + distribution=distribution, + git_fetch_ref="refs/tags/v8.2.1" + ) + + assert release.commit == "abc123def456" + assert release.version == version + assert release.distribution == distribution + + def test_release_string_representation(self): + """Test Release string representation.""" + version = RedisVersion.parse("8.2.1") + distribution = Distribution(type=DistroType.DEBIAN, name="bookworm") + + release = Release( + commit="abc123def456", + version=version, + distribution=distribution, + git_fetch_ref="refs/tags/v8.2.1" + ) + + expected = "abc123de 8.2.1 debian bookworm" + assert str(release) == expected + + +class TestStackbrewEntry: + """Tests for StackbrewEntry model.""" + + def test_debian_architectures(self): + """Test that Debian distributions get the correct architectures.""" + version = RedisVersion.parse("8.2.1") + distribution = Distribution(type=DistroType.DEBIAN, name="bookworm") + + entry = StackbrewEntry( + tags=["8.2.1", "latest"], + commit="abc123def456", + version=version, + distribution=distribution, + git_fetch_ref="refs/tags/v8.2.1" + ) + + expected_architectures = ["amd64", "arm32v5", "arm32v7", "arm64v8", "i386", "mips64le", "ppc64le", "s390x"] + assert entry.architectures == expected_architectures + + def test_alpine_architectures(self): + """Test that Alpine distributions get the correct architectures.""" + version = RedisVersion.parse("8.2.1") + distribution = Distribution(type=DistroType.ALPINE, name="alpine3.22") + + entry = StackbrewEntry( + tags=["8.2.1-alpine", "alpine"], + commit="abc123def456", + version=version, + distribution=distribution, + git_fetch_ref="refs/tags/v8.2.1" + ) + + expected_architectures = ["amd64", "arm32v6", "arm32v7", "arm64v8", "i386", "ppc64le", "riscv64", "s390x"] + assert entry.architectures == expected_architectures + + def test_stackbrew_entry_string_format(self): + """Test that StackbrewEntry formats correctly with architectures.""" + version = RedisVersion.parse("8.2.1") + distribution = Distribution(type=DistroType.ALPINE, name="alpine3.22") + + entry = StackbrewEntry( + tags=["8.2.1-alpine", "alpine"], + commit="abc123def456", + version=version, + distribution=distribution, + git_fetch_ref="refs/tags/v8.2.1" + ) + + output = str(entry) + + # Check that it contains the expected Alpine architectures + assert "amd64, arm32v6, arm32v7, arm64v8, i386, ppc64le, riscv64, s390x" in output + assert "Tags: 8.2.1-alpine, alpine" in output + assert "GitCommit: abc123def456" in output + assert "GitFetch: refs/tags/v8.2.1" in output + assert "Directory: alpine" in output diff --git a/release-automation/tests/test_stackbrew.py b/release-automation/tests/test_stackbrew.py new file mode 100644 index 000000000..e0864a41f --- /dev/null +++ b/release-automation/tests/test_stackbrew.py @@ -0,0 +1,203 @@ +"""Tests for stackbrew library generation.""" + +from stackbrew_generator.models import RedisVersion, Distribution, DistroType, Release +from stackbrew_generator.stackbrew import StackbrewGenerator + + +class TestStackbrewGenerator: + """Tests for StackbrewGenerator.""" + + def setup_method(self): + """Set up test fixtures.""" + self.generator = StackbrewGenerator() + + def test_generate_tags_debian_ga_latest(self): + """Test tag generation for Debian GA version (latest).""" + version = RedisVersion.parse("8.2.1") + distribution = Distribution(type=DistroType.DEBIAN, name="bookworm") + release = Release(commit="abc123", version=version, distribution=distribution, git_fetch_ref="refs/tags/v8.2.1") + + tags = self.generator.generate_tags_for_release(release, is_latest=True) + + expected_tags = [ + "8.2.1", # Full version + "8.2", # Mainline version (GA only) + "8", # Major version (latest only) + "8.2.1-bookworm", # Version with distro + "8.2-bookworm", # Mainline with distro + "8-bookworm", # Major with distro + "latest", # Latest tag (default distro only) + "bookworm" # Bare distro name (latest only) + ] + + assert set(tags) == set(expected_tags) + + def test_generate_tags_debian_ga_not_latest(self): + """Test tag generation for Debian GA version (not latest).""" + version = RedisVersion.parse("7.4.1") + distribution = Distribution(type=DistroType.DEBIAN, name="bookworm") + release = Release(commit="abc123", version=version, distribution=distribution, git_fetch_ref="refs/tags/v7.4.1") + + tags = self.generator.generate_tags_for_release(release, is_latest=False) + + expected_tags = [ + "7.4.1", # Full version + "7.4", # Mainline version (GA only) + "7.4.1-bookworm", # Version with distro + "7.4-bookworm" # Mainline with distro + ] + + assert set(tags) == set(expected_tags) + + def test_generate_tags_alpine_ga_latest(self): + """Test tag generation for Alpine GA version (latest).""" + version = RedisVersion.parse("8.2.1") + distribution = Distribution(type=DistroType.ALPINE, name="alpine3.22") + release = Release(commit="abc123", version=version, distribution=distribution, git_fetch_ref="refs/tags/v8.2.1") + + tags = self.generator.generate_tags_for_release(release, is_latest=True) + + expected_tags = [ + "8.2.1-alpine", # Version with distro type + "8.2.1-alpine3.22", # Version with full distro name + "8.2-alpine", # Mainline with distro type + "8.2-alpine3.22", # Mainline with full distro name + "8-alpine", # Major with distro type + "8-alpine3.22", # Major with full distro name + "alpine", # Bare distro type (latest only) + "alpine3.22" # Bare distro name (latest only) + ] + + assert set(tags) == set(expected_tags) + + def test_generate_tags_milestone_version(self): + """Test tag generation for milestone version.""" + version = RedisVersion.parse("8.2.1-m01") + distribution = Distribution(type=DistroType.DEBIAN, name="bookworm") + release = Release(commit="abc123", version=version, distribution=distribution, git_fetch_ref="refs/tags/v8.2.1-m01") + + tags = self.generator.generate_tags_for_release(release, is_latest=False) + + # Milestone versions should not get mainline version tags or major version tags + expected_tags = [ + "8.2.1-m01", # Full version only + "8.2.1-m01-bookworm", # Version with distro + ] + + assert set(tags) == set(expected_tags) + + + + def test_generate_stackbrew_library(self): + """Test complete stackbrew library generation.""" + releases = [ + Release( + commit="abc123", + version=RedisVersion.parse("8.2.1"), + distribution=Distribution(type=DistroType.DEBIAN, name="bookworm"), + git_fetch_ref="refs/tags/v8.2.1" + ), + Release( + commit="abc123", + version=RedisVersion.parse("8.2.1"), + distribution=Distribution(type=DistroType.ALPINE, name="alpine3.22"), + git_fetch_ref="refs/tags/v8.2.1" + ), + Release( + commit="def456", + version=RedisVersion.parse("8.1.5"), + distribution=Distribution(type=DistroType.DEBIAN, name="bookworm"), + git_fetch_ref="refs/tags/v8.1.5" + ) + ] + + entries = self.generator.generate_stackbrew_library(releases) + + assert len(entries) == 3 + + # Check that the 8.2.1 versions are marked as latest + debian_8_2_1 = next(e for e in entries if e.version.patch == 1 and e.distribution.type == DistroType.DEBIAN) + assert "latest" in debian_8_2_1.tags + assert "8" in debian_8_2_1.tags + + # Check that 8.1.5 is not marked as latest + debian_8_1_5 = next(e for e in entries if e.version.minor == 1) + assert "latest" not in debian_8_1_5.tags + assert "8" not in debian_8_1_5.tags + + def test_format_stackbrew_output(self): + """Test stackbrew output formatting.""" + entries = [ + Release( + commit="abc123", + version=RedisVersion.parse("8.2.1"), + distribution=Distribution(type=DistroType.DEBIAN, name="bookworm"), + git_fetch_ref="refs/tags/v8.2.1" + ) + ] + + stackbrew_entries = self.generator.generate_stackbrew_library(entries) + output = self.generator.format_stackbrew_output(stackbrew_entries) + + assert isinstance(output, str) + assert len(output) > 0 + # Should contain comma-separated tags + assert "," in output + + def test_generate_stackbrew_library_with_head_milestone(self): + """Test stackbrew generation with milestone at head (matches bash test).""" + # This matches the bash test case: test_generate_stackbrew_library_with_head_milestone + releases = [ + Release( + commit="8d4437bdd0443189f9b3ba5943fdf793f821e8e2", + version=RedisVersion.parse("8.2.2-m01-int1"), + distribution=Distribution.from_dockerfile_line("FROM debian:bookworm"), + git_fetch_ref="refs/tags/v8.2.2-m01-int1" + ), + Release( + commit="8d4437bdd0443189f9b3ba5943fdf793f821e8e2", + version=RedisVersion.parse("8.2.2-m01-int1"), + distribution=Distribution.from_dockerfile_line("FROM alpine:3.22"), + git_fetch_ref="refs/tags/v8.2.2-m01-int1" + ), + Release( + commit="a13b78815d980881e57f15b9cf13cd2f26f3fab6", + version=RedisVersion.parse("8.2.1"), + distribution=Distribution.from_dockerfile_line("FROM debian:bookworm"), + git_fetch_ref="refs/tags/v8.2.1" + ), + Release( + commit="a13b78815d980881e57f15b9cf13cd2f26f3fab6", + version=RedisVersion.parse("8.2.1"), + distribution=Distribution.from_dockerfile_line("FROM alpine:3.22"), + git_fetch_ref="refs/tags/v8.2.1" + ), + Release( + commit="101262a8cf05b98137d88bc17e77db90c24cc783", + version=RedisVersion.parse("8.0.3"), + distribution=Distribution.from_dockerfile_line("FROM debian:bookworm"), + git_fetch_ref="refs/tags/v8.0.3" + ), + Release( + commit="101262a8cf05b98137d88bc17e77db90c24cc783", + version=RedisVersion.parse("8.0.3"), + distribution=Distribution.from_dockerfile_line("FROM alpine:3.21"), + git_fetch_ref="refs/tags/v8.0.3" + ) + ] + + entries = self.generator.generate_stackbrew_library(releases) + + # Expected tags based on bash test + expected_tags = [ + ["8.2.2-m01-int1", "8.2.2-m01-int1-bookworm"], # milestone - no major/mainline tags + ["8.2.2-m01-int1-alpine", "8.2.2-m01-int1-alpine3.22"], # milestone - no major/mainline tags + ["8.2.1", "8.2", "8", "8.2.1-bookworm", "8.2-bookworm", "8-bookworm", "latest", "bookworm"], # GA - gets all tags + ["8.2.1-alpine", "8.2-alpine", "8-alpine", "8.2.1-alpine3.22", "8.2-alpine3.22", "8-alpine3.22", "alpine", "alpine3.22"], # GA - gets all tags + ["8.0.3", "8.0", "8.0.3-bookworm", "8.0-bookworm"], # different minor - no major tags + ["8.0.3-alpine", "8.0-alpine", "8.0.3-alpine3.21", "8.0-alpine3.21"] # different minor - no major tags + ] + + assert len(entries) == 6 + for i, entry in enumerate(entries): + assert set(entry.tags) == set(expected_tags[i]), f"Tags mismatch for entry {i}: {entry.tags} != {expected_tags[i]}" diff --git a/release-automation/tests/test_stackbrew_updater.py b/release-automation/tests/test_stackbrew_updater.py new file mode 100644 index 000000000..36b98590e --- /dev/null +++ b/release-automation/tests/test_stackbrew_updater.py @@ -0,0 +1,123 @@ +"""Tests for StackbrewUpdater class.""" + +import tempfile +from pathlib import Path + +from stackbrew_generator.stackbrew import StackbrewUpdater + +class TestStackbrewUpdater: + """Tests for StackbrewUpdater class.""" + + def setup_method(self): + """Set up test fixtures.""" + self.updater = StackbrewUpdater() + + def test_update_stackbrew_content_basic(self): + """Test basic stackbrew content update functionality.""" + # Create a sample stackbrew file + sample_content = """# This file was generated via https://github.com/redis/docker-library-redis/blob/abc123/generate-stackbrew-library.sh + +Maintainers: David Maier (@dmaier-redislabs), + Yossi Gottlieb (@yossigo) +GitRepo: https://github.com/redis/docker-library-redis.git + +Tags: 8.2.1, 8.2, 8, 8.2.1-bookworm, 8.2-bookworm, 8-bookworm, latest, bookworm +Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x +GitCommit: old123commit +GitFetch: refs/tags/v8.2.1 +Directory: debian + +Tags: 8.2.1-alpine, 8.2-alpine, 8-alpine, 8.2.1-alpine3.22, 8.2-alpine3.22, 8-alpine3.22, alpine, alpine3.22 +Architectures: amd64, arm32v6, arm32v7, arm64v8, i386, ppc64le, riscv64, s390x +GitCommit: old123commit +GitFetch: refs/tags/v8.2.1 +Directory: alpine + +Tags: 7.4.0, 7.4, 7, 7.4.0-bookworm, 7.4-bookworm, 7-bookworm +Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x +GitCommit: old456commit +GitFetch: refs/tags/v7.4.0 +Directory: debian +""" + + new_content = """Tags: 8.2.2, 8.2, 8, 8.2.2-bookworm, 8.2-bookworm, 8-bookworm, latest, bookworm +Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x +GitCommit: new123commit +GitFetch: refs/tags/v8.2.2 +Directory: debian + +Tags: 8.2.2-alpine, 8.2-alpine, 8-alpine, 8.2.2-alpine3.22, 8.2-alpine3.22, 8-alpine3.22, alpine, alpine3.22 +Architectures: amd64, arm32v6, arm32v7, arm64v8, i386, ppc64le, riscv64, s390x +GitCommit: new123commit +GitFetch: refs/tags/v8.2.2 +Directory: alpine""" + + with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f: + f.write(sample_content) + input_file = Path(f.name) + + try: + # Update the content + updated_content = self.updater.update_stackbrew_content( + input_file, 8, new_content, verbose=False + ) + + # Should still have the header + assert "Maintainers: David Maier" in updated_content + assert "GitRepo: https://github.com/redis/docker-library-redis.git" in updated_content + + # Should have new Redis 8.x content + assert "new123commit" in updated_content + assert "8.2.2" in updated_content + + # Should still have Redis 7.x content (unchanged) + assert "7.4.0" in updated_content + assert "old456commit" in updated_content + + # Should not have old Redis 8.x content + assert "old123commit" not in updated_content + assert "8.2.1" not in updated_content + + finally: + input_file.unlink() + + def test_parse_stackbrew_entries(self): + """Test parsing stackbrew entries.""" + lines = [ + "Tags: 8.2.1, 8.2, 8", + "Architectures: amd64, arm64v8", + "GitCommit: abc123", + "Directory: debian", + "", + "Tags: 8.2.1-alpine, 8.2-alpine", + "Architectures: amd64, arm64v8", + "GitCommit: abc123", + "Directory: alpine" + ] + + entries = self.updater._parse_stackbrew_entries(lines) + + assert len(entries) == 2 + assert entries[0][0] == "Tags: 8.2.1, 8.2, 8" + assert entries[1][0] == "Tags: 8.2.1-alpine, 8.2-alpine" + + def test_entry_belongs_to_major_version(self): + """Test checking if entry belongs to major version.""" + entry_8x = [ + "Tags: 8.2.1, 8.2, 8, latest", + "Architectures: amd64", + "GitCommit: abc123", + "Directory: debian" + ] + + entry_7x = [ + "Tags: 7.4.0, 7.4, 7", + "Architectures: amd64", + "GitCommit: def456", + "Directory: debian" + ] + + assert self.updater._entry_belongs_to_major_version(entry_8x, 8) is True + assert self.updater._entry_belongs_to_major_version(entry_8x, 7) is False + assert self.updater._entry_belongs_to_major_version(entry_7x, 7) is True + assert self.updater._entry_belongs_to_major_version(entry_7x, 8) is False diff --git a/release-automation/tests/test_update_stackbrew_file.py b/release-automation/tests/test_update_stackbrew_file.py new file mode 100644 index 000000000..acdbd3ac1 --- /dev/null +++ b/release-automation/tests/test_update_stackbrew_file.py @@ -0,0 +1,297 @@ +"""Tests for update-stackbrew-file command.""" + +import tempfile +from pathlib import Path +from unittest.mock import Mock, patch + +import pytest +from typer.testing import CliRunner + +from stackbrew_generator.cli import app +from stackbrew_generator.models import RedisVersion, Distribution, DistroType, Release + + +class TestUpdateStackbrewFile: + """Tests for update-stackbrew-file command.""" + + def setup_method(self): + """Set up test fixtures.""" + self.runner = CliRunner() + + def test_update_stackbrew_file_basic(self): + """Test basic stackbrew file update functionality.""" + # Create a sample stackbrew file + sample_content = """# This file was generated via https://github.com/redis/docker-library-redis/blob/abc123/generate-stackbrew-library.sh + +Maintainers: David Maier (@dmaier-redislabs), + Yossi Gottlieb (@yossigo) +GitRepo: https://github.com/redis/docker-library-redis.git + +Tags: 8.2.1, 8.2, 8, 8.2.1-bookworm, 8.2-bookworm, 8-bookworm, latest, bookworm +Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x +GitCommit: old123commit +GitFetch: refs/tags/v8.2.1 +Directory: debian + +Tags: 8.2.1-alpine, 8.2-alpine, 8-alpine, 8.2.1-alpine3.22, 8.2-alpine3.22, 8-alpine3.22, alpine, alpine3.22 +Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x +GitCommit: old123commit +GitFetch: refs/tags/v8.2.1 +Directory: alpine + +Tags: 7.4.0, 7.4, 7, 7.4.0-bookworm, 7.4-bookworm, 7-bookworm +Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x +GitCommit: old456commit +GitFetch: refs/tags/v7.4.0 +Directory: debian +""" + + with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f: + f.write(sample_content) + input_file = Path(f.name) + + try: + with patch('stackbrew_generator.cli.DistributionDetector') as mock_detector_class, \ + patch('stackbrew_generator.cli.GitClient') as mock_git_client_class, \ + patch('stackbrew_generator.cli.VersionFilter') as mock_version_filter_class: + + # Mock git client + mock_git_client = Mock() + mock_git_client_class.return_value = mock_git_client + + # Mock distribution detector + mock_distribution_detector = Mock() + mock_detector_class.return_value = mock_distribution_detector + + # Mock version filter + mock_version_filter = Mock() + mock_version_filter_class.return_value = mock_version_filter + + # Mock the version filter to return Redis 8.x versions + mock_version_filter.get_actual_major_redis_versions.return_value = [ + (RedisVersion.parse("8.2.2"), "new123commit", "refs/tags/v8.2.2") + ] + + # Mock releases + mock_releases = [ + Release( + commit="new123commit", + version=RedisVersion.parse("8.2.2"), + distribution=Distribution(type=DistroType.DEBIAN, name="bookworm"), + git_fetch_ref="refs/tags/v8.2.2" + ) + ] + mock_distribution_detector.prepare_releases_list.return_value = mock_releases + + # Run the command with output to file + result = self.runner.invoke(app, [ + "update-stackbrew-file", + "8", + "--input", str(input_file), + "--output", str(input_file), + "--verbose" + ]) + + assert result.exit_code == 0 + + # Check that the file was updated + updated_content = input_file.read_text() + + # Should still have the header + assert "Maintainers: David Maier" in updated_content + assert "GitRepo: https://github.com/redis/docker-library-redis.git" in updated_content + + # Should have new Redis 8.x content + assert "new123commit" in updated_content + assert "8.2.2" in updated_content + + # Should still have Redis 7.x content (unchanged) + assert "7.4.0" in updated_content + assert "old456commit" in updated_content + + # Should not have old Redis 8.x content + assert "old123commit" not in updated_content + assert "8.2.1" not in updated_content + + finally: + input_file.unlink() + + def test_update_stackbrew_file_nonexistent_input(self): + """Test error handling for nonexistent input file.""" + result = self.runner.invoke(app, [ + "update-stackbrew-file", + "8", + "--input", "/nonexistent/file.txt" + ]) + + assert result.exit_code == 1 + assert "Input file does not exist" in result.stderr + + def test_update_stackbrew_file_no_versions_found(self): + """Test error handling when no versions are found.""" + sample_content = """# Header +Maintainers: Test +GitRepo: https://example.com +""" + + with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f: + f.write(sample_content) + input_file = Path(f.name) + + try: + with patch('stackbrew_generator.cli.GitClient') as mock_git_client_class, \ + patch('stackbrew_generator.cli.VersionFilter') as mock_version_filter_class: + + mock_git_client = Mock() + mock_git_client_class.return_value = mock_git_client + + mock_version_filter = Mock() + mock_version_filter_class.return_value = mock_version_filter + mock_version_filter.get_actual_major_redis_versions.return_value = [] + + result = self.runner.invoke(app, [ + "update-stackbrew-file", + "9", + "--input", str(input_file) + ]) + + assert result.exit_code == 1 + assert "No versions found for Redis 9.x" in result.stderr + + finally: + input_file.unlink() + + def test_update_stackbrew_file_with_output_option(self): + """Test using separate output file.""" + sample_content = """# Header +Maintainers: Test +GitRepo: https://example.com + +Tags: 8.1.0, 8.1, 8.1.0-bookworm, 8.1-bookworm +Architectures: amd64 +GitCommit: old123 +GitFetch: refs/tags/v8.1.0 +Directory: debian +""" + + with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as input_f, \ + tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as output_f: + + input_f.write(sample_content) + input_file = Path(input_f.name) + output_file = Path(output_f.name) + + try: + with patch('stackbrew_generator.cli.DistributionDetector') as mock_detector_class, \ + patch('stackbrew_generator.cli.GitClient') as mock_git_client_class, \ + patch('stackbrew_generator.cli.VersionFilter') as mock_version_filter_class: + + # Setup mocks + mock_git_client = Mock() + mock_git_client_class.return_value = mock_git_client + + mock_distribution_detector = Mock() + mock_detector_class.return_value = mock_distribution_detector + + mock_version_filter = Mock() + mock_version_filter_class.return_value = mock_version_filter + mock_version_filter.get_actual_major_redis_versions.return_value = [ + (RedisVersion.parse("8.2.0"), "new456commit", "refs/tags/v8.2.0") + ] + + mock_releases = [ + Release( + commit="new456commit", + version=RedisVersion.parse("8.2.0"), + distribution=Distribution(type=DistroType.DEBIAN, name="bookworm"), + git_fetch_ref="refs/tags/v8.2.0" + ) + ] + mock_distribution_detector.prepare_releases_list.return_value = mock_releases + + result = self.runner.invoke(app, [ + "update-stackbrew-file", + "8", + "--input", str(input_file), + "--output", str(output_file) + ]) + + assert result.exit_code == 0 + + # Original file should be unchanged + original_content = input_file.read_text() + assert "old123" in original_content + + # Output file should have updated content + updated_content = output_file.read_text() + assert "new456commit" in updated_content + assert "8.2.0" in updated_content + assert "old123" not in updated_content + + finally: + input_file.unlink() + output_file.unlink() + + def test_update_stackbrew_file_stdout_output(self): + """Test outputting to stdout when no output file is specified.""" + sample_content = """# Header +Maintainers: Test +GitRepo: https://example.com + +Tags: 8.1.0, 8.1, 8.1.0-bookworm, 8.1-bookworm +Architectures: amd64 +GitCommit: old123 +GitFetch: refs/tags/v8.1.0 +Directory: debian +""" + + with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f: + f.write(sample_content) + input_file = Path(f.name) + + try: + with patch('stackbrew_generator.cli.DistributionDetector') as mock_detector_class, \ + patch('stackbrew_generator.cli.GitClient') as mock_git_client_class, \ + patch('stackbrew_generator.cli.VersionFilter') as mock_version_filter_class: + + # Setup mocks + mock_git_client = Mock() + mock_git_client_class.return_value = mock_git_client + + mock_distribution_detector = Mock() + mock_detector_class.return_value = mock_distribution_detector + + mock_version_filter = Mock() + mock_version_filter_class.return_value = mock_version_filter + mock_version_filter.get_actual_major_redis_versions.return_value = [ + (RedisVersion.parse("8.2.0"), "new789commit", "refs/tags/v8.2.0") + ] + + mock_releases = [ + Release( + commit="new789commit", + version=RedisVersion.parse("8.2.0"), + distribution=Distribution(type=DistroType.DEBIAN, name="bookworm"), + git_fetch_ref="refs/tags/v8.2.0" + ) + ] + mock_distribution_detector.prepare_releases_list.return_value = mock_releases + + result = self.runner.invoke(app, [ + "update-stackbrew-file", + "8", + "--input", str(input_file) + ]) + + assert result.exit_code == 0 + + # Should output to stdout + assert "new789commit" in result.stdout + assert "8.2.0" in result.stdout + + # Original file should be unchanged + original_content = input_file.read_text() + assert "old123" in original_content + + finally: + input_file.unlink() diff --git a/release-automation/tests/test_version_filter.py b/release-automation/tests/test_version_filter.py new file mode 100644 index 000000000..7ebeb556d --- /dev/null +++ b/release-automation/tests/test_version_filter.py @@ -0,0 +1,324 @@ +"""Tests for VersionFilter class.""" + +import pytest +from unittest.mock import Mock, patch + +from stackbrew_generator.models import RedisVersion +from stackbrew_generator.version_filter import VersionFilter +from stackbrew_generator.git_operations import GitClient +from stackbrew_generator.exceptions import GitOperationError + + +class MockGitClient: + """Mock GitClient for testing.""" + + def __init__(self): + """Initialize mock git client.""" + self.remote_tags = [] + self.version_extraction_results = {} + + def set_remote_tags(self, tags): + """Set mock remote tags. + + Args: + tags: List of (commit, tag_ref) tuples + """ + self.remote_tags = tags + + def set_version_extraction_result(self, tag_ref, version_or_exception): + """Set mock version extraction result. + + Args: + tag_ref: Tag reference + version_or_exception: RedisVersion instance or Exception to raise + """ + self.version_extraction_results[tag_ref] = version_or_exception + + def list_remote_tags(self, major_version): + """Mock list_remote_tags method.""" + return self.remote_tags + + def extract_version_from_tag(self, tag_ref, major_version): + """Mock extract_version_from_tag method.""" + if tag_ref in self.version_extraction_results: + result = self.version_extraction_results[tag_ref] + if isinstance(result, Exception): + raise result + return result + # Default behavior - try to parse from tag_ref + return RedisVersion.parse(tag_ref.replace('refs/tags/', '')) + + +def create_version_tuples(version_strings): + """Helper to create version tuples from version strings. + + Args: + version_strings: List of version strings + + Returns: + List of (RedisVersion, commit, tag_ref) tuples + """ + tuples = [] + for i, version_str in enumerate(version_strings): + version = RedisVersion.parse(version_str) + commit = f"commit{i:03d}" + tag_ref = f"refs/tags/{version_str}" + tuples.append((version, commit, tag_ref)) + + tuples.sort(key=lambda x: x[0].sort_key, reverse=True) + return tuples + + +class TestVersionFilter: + """Tests for VersionFilter class.""" + + def test_init(self): + """Test VersionFilter initialization.""" + git_client = GitClient() + version_filter = VersionFilter(git_client) + assert version_filter.git_client is git_client + + def test_get_redis_versions_from_tags_success(self): + """Test successful version retrieval from tags.""" + mock_git_client = MockGitClient() + mock_git_client.set_remote_tags([ + ("commit001", "refs/tags/v8.2.1"), + ("commit002", "refs/tags/v8.2.0"), + ("commit003", "refs/tags/v8.1.0"), + ]) + + version_filter = VersionFilter(mock_git_client) + result = version_filter.get_redis_versions_from_tags(8) + + # Should be sorted by version (newest first) + version_strings = [str(v[0]) for v in result] + expected_versions = ["8.2.1", "8.2.0", "8.1.0"] + assert version_strings == expected_versions + + # Check commits and tag refs + commits = [v[1] for v in result] + tag_refs = [v[2] for v in result] + expected_commits = ["commit001", "commit002", "commit003"] + expected_tag_refs = ["refs/tags/v8.2.1", "refs/tags/v8.2.0", "refs/tags/v8.1.0"] + assert commits == expected_commits + assert tag_refs == expected_tag_refs + + def test_get_redis_versions_from_tags_with_invalid_tags(self): + """Test version retrieval with some invalid tags.""" + mock_git_client = MockGitClient() + mock_git_client.set_remote_tags([ + ("commit001", "refs/tags/v8.2.1"), + ("commit002", "refs/tags/invalid-tag"), + ("commit003", "refs/tags/v8.1.0"), + ]) + + # Set up invalid tag to raise exception + mock_git_client.set_version_extraction_result( + "refs/tags/invalid-tag", + ValueError("Invalid version format") + ) + + version_filter = VersionFilter(mock_git_client) + result = version_filter.get_redis_versions_from_tags(8) + + # Should skip invalid tag and return only valid ones + version_strings = [str(v[0]) for v in result] + expected_versions = ["8.2.1", "8.1.0"] + assert version_strings == expected_versions + + def test_get_redis_versions_from_tags_empty(self): + """Test version retrieval with no tags.""" + mock_git_client = MockGitClient() + mock_git_client.set_remote_tags([]) + + version_filter = VersionFilter(mock_git_client) + result = version_filter.get_redis_versions_from_tags(8) + + assert result == [] + + def test_filter_eol_versions_basic(self): + """Test basic EOL version filtering.""" + version_filter = VersionFilter(MockGitClient()) + + # Create test versions with one EOL minor version + versions = create_version_tuples([ + "v8.2.1", + "v8.2.0", + "v8.1.0-eol", + "v8.1.0-zoo1", + "v8.1.2", + "v8.0.1", + "v8.0.0" + ]) + + result = version_filter.filter_eol_versions(versions) + + # Should filter out all 8.1.* versions (because 8.1.0-eol exists) + version_strings = [str(v[0]) for v in result] + expected_versions = ["8.2.1", "8.2.0", "8.0.1", "8.0.0"] + assert version_strings == expected_versions + + def test_filter_eol_versions_empty(self): + """Test EOL filtering with empty input.""" + version_filter = VersionFilter(MockGitClient()) + result = version_filter.filter_eol_versions([]) + assert result == [] + + def test_filter_actual_versions_basic(self): + """Test basic actual version filtering (latest patch per minor/milestone).""" + version_filter = VersionFilter(MockGitClient()) + + # Create versions with multiple patches for same minor version + versions = create_version_tuples([ + "v8.2.2", # Latest patch for 8.2 GA + "v8.2.1", # Older patch for 8.2 GA + "v8.2.0", # Oldest patch for 8.2 GA + "v8.1.1", # Latest patch for 8.1 GA + "v8.1.0", # Older patch for 8.1 GA + ]) + + result = version_filter.filter_actual_versions(versions) + + # Should keep only latest patch for each minor version + version_strings = [str(v[0]) for v in result] + expected_versions = ["8.2.2", "8.1.1"] + assert version_strings == expected_versions + + def test_filter_actual_versions_with_milestones_in_same_patch(self): + """Test actual version filtering with milestone versions.""" + version_filter = VersionFilter(MockGitClient()) + + # Create versions with both GA and milestone versions + versions = create_version_tuples([ + "v8.2.1", # GA version + "v8.2.1-m02", # Latest milestone for 8.2 + "v8.2.1-m01", # Older milestone for 8.2 + "v8.1.0", # GA version + "v8.1.0-m01", # Milestone for 8.1 + ]) + + result = version_filter.filter_actual_versions(versions) + + # Should keep latest GA and latest milestone for each minor version + version_strings = [str(v[0]) for v in result] + expected_versions = ["8.2.1", "8.1.0"] + assert version_strings == expected_versions + + def test_filter_actual_versions_with_milestones_in_mainline(self): + """Test actual version filtering with milestone versions.""" + version_filter = VersionFilter(MockGitClient()) + + # Create versions with both GA and milestone versions + versions = create_version_tuples([ + "v8.2.1", # GA version for 8.2 mainline + "v8.2.2-m02", # Latest milestone for 8.2.2 + "v8.2.2-m01", # Older milestone for 8.2.2 + "v8.1.0", # GA version + "v8.1.1-m01", # Milestone for 8.1 + "v8.2.0-m03", # Older milestone for 8.2.0 + ]) + + result = version_filter.filter_actual_versions(versions) + + # Should keep latest GA and latest milestone for each minor version + version_strings = [str(v[0]) for v in result] + expected_versions = ["8.2.2-m02", "8.2.1", "8.1.1-m01", "8.1.0"] + assert version_strings == expected_versions + + def test_when_filter_actual_versions_with_milestones_rc_is_preferred(self): + """Test actual version filtering with milestone versions.""" + version_filter = VersionFilter(MockGitClient()) + + # Create versions with both GA and milestone versions + versions = create_version_tuples([ + "v8.2.1", # GA version for 8.2 mainline + "v8.2.2-rc01", # Latest milestone for 8.2.2 + "v8.2.2-m02", # Latest milestone for 8.2.2 + "v8.2.2-m01", # Older milestone for 8.2.2 + "v8.1.0", # GA version + ]) + + result = version_filter.filter_actual_versions(versions) + + # Should keep latest GA and latest milestone for each minor version + version_strings = [str(v[0]) for v in result] + expected_versions = ["8.2.2-rc01", "8.2.1", "8.1.0"] + assert version_strings == expected_versions + + def test_filter_actual_versions_milestone_only(self): + """Test actual version filtering with only milestone versions.""" + version_filter = VersionFilter(MockGitClient()) + + versions = create_version_tuples([ + "v8.2.1-m02", + "v8.2.1-m01", + "v8.1.0-m01", + ]) + + result = version_filter.filter_actual_versions(versions) + + version_strings = [str(v[0]) for v in result] + expected_versions = ["8.2.1-m02", "8.1.0-m01"] + assert version_strings == expected_versions + + def test_filter_actual_versions_empty(self): + """Test actual version filtering with empty input.""" + version_filter = VersionFilter(MockGitClient()) + result = version_filter.filter_actual_versions([]) + assert result == [] + + def test_get_actual_major_redis_versions_success(self): + """Test the main entry point method with successful flow.""" + mock_git_client = MockGitClient() + mock_git_client.set_remote_tags([ + ("commit001", "refs/tags/v8.2.1"), + ("commit002", "refs/tags/v8.2.0"), + ("commit003", "refs/tags/v8.1.0-eol"), # Should be filtered out + ("commit004", "refs/tags/v8.0.1"), + ("commit005", "refs/tags/v8.0.0"), + ]) + + version_filter = VersionFilter(mock_git_client) + result = version_filter.get_actual_major_redis_versions(8) + + # Should apply all filters: get tags -> filter EOL -> filter actual + version_strings = [str(v[0]) for v in result] + expected_versions = ["8.2.1", "8.0.1"] # Latest patches, no EOL + assert version_strings == expected_versions + + def test_get_actual_major_redis_versions_no_versions(self): + """Test main entry point with no versions found.""" + mock_git_client = MockGitClient() + mock_git_client.set_remote_tags([]) + + version_filter = VersionFilter(mock_git_client) + result = version_filter.get_actual_major_redis_versions(8) + + assert result == [] + +class TestVersionFilterIntegration: + """Integration tests using real GitClient (mocked at subprocess level).""" + + @patch('stackbrew_generator.git_operations.subprocess.run') + def test_integration_with_real_git_client(self, mock_subprocess): + """Test VersionFilter with real GitClient (mocked subprocess).""" + # Mock git ls-remote output + mock_subprocess.return_value.stdout = ( + "commit001\trefs/tags/v8.2.1\n" + "commit002\trefs/tags/v8.2.0\n" + "commit003\trefs/tags/v8.1.0-eol\n" + ) + mock_subprocess.return_value.returncode = 0 + + git_client = GitClient() + version_filter = VersionFilter(git_client) + + result = version_filter.get_actual_major_redis_versions(8) + + # Should get filtered results + version_strings = [str(v[0]) for v in result] + commits = [v[1] for v in result] + expected_versions = ["8.2.1"] # Only 8.2.1 after all filtering + expected_commits = ["commit001"] + assert version_strings == expected_versions + assert commits == expected_commits \ No newline at end of file diff --git a/test/run-entrypoint-tests.sh b/test/run-entrypoint-tests.sh new file mode 100755 index 000000000..4516c6c9d --- /dev/null +++ b/test/run-entrypoint-tests.sh @@ -0,0 +1,702 @@ +#!/bin/bash + +## +# +# These tests are designed to verify the correctness of the entrypoint behavior +# under different preconditions and arguments. As such, in some tests, it is +# expected that the Redis process may fail with errors. +# +# To run specific test use: +# +# REDIS_IMG=image ./test.sh -- specific_test_name +# +# To get verbose output use TEST_VERBOSE=1: +# +# TEST_VERBOSE=1 REDIS_IMG=image ./test.sh +# +# Uses shunit2: https://github.com/kward/shunit2 +# +# Requires sudo +# +## + +# Container initialization wait time in seconds +CONTAINER_INIT_WAIT=3 + +if [ -z "$REDIS_IMG" ]; then + echo "REDIS_IMG may not be empty" + exit 1 +fi +# By default create files owned by root to avoid intersecting with container user +HOST_UID=0 +HOST_GID=0 +if docker info 2>/dev/null | grep -qi rootless; then + # For rootless docker we have to use current user + HOST_UID=$(id -u) + HOST_GID=$(id -g) +fi +HOST_OWNER=$HOST_UID:$HOST_GID + +get_container_user_uid_gid_on_the_host() { + container_user="$1" + dir=$(mktemp -d -p .) + docker run --rm -v "$(pwd)/$dir":/w -w /w --entrypoint=/bin/sh "$REDIS_IMG" -c "chown $container_user ." + stat -c "%u %g" "$dir" + sudo rm -rf "$dir" +} + +# Detect how redis user and group from the container are mapped to the host ones +read -r REDIS_UID _ <<< "$(get_container_user_uid_gid_on_the_host redis:redis)" + +if [ "$REDIS_UID" == "$HOST_UID" ]; then + echo "Cannot test ownership as redis user uid is the same as current user" + exit 1 +fi + +# Helper functions # + +# Wait for Redis server or sentiel to be ready in a container by pinging it +# Arguments: +# $1 - container name/id +# Returns: +# 0 if Redis responds with PONG within timeout +# 1 if timeout CONTAINER_INIT_WAIT occurs +wait_for_redis_server_in_container() { + local container="$1" + local timeout="${CONTAINER_INIT_WAIT:-3}" + local elapsed=0 + local sleep_interval=0.1 + + if [ -z "$container" ]; then + return 1 + fi + + while [[ "$elapsed" < "$timeout" ]]; do + # Try to ping Redis server + if response=$(docker exec "$container" redis-cli ping 2>/dev/null) && [ "$response" = "PONG" ]; then + return 0 + fi + + if response=$(docker exec "$container" redis-cli -p 26379 ping 2>/dev/null) && [ "$response" = "PONG" ]; then + return 0 + fi + + # Sleep and increment elapsed time + sleep "$sleep_interval" + elapsed=$(awk "BEGIN {print $elapsed + $sleep_interval}") + done + + echo "Timeout: Redis server did not respond within ${timeout}s" + docker stop "$container" >/dev/null + return 1 +} + +# creates one entry of directory structure +# used in combination with iterate_dir_structure_with +create_entry() { + dir="$1" + if [ "$type" = dir ]; then + sudo mkdir -p "$dir/$entry" + elif [ "$type" = file ]; then + sudo touch "$dir/$entry" + else + echo "Unknown type '$type' for entry '$entry'" + return 1 + fi + sudo chmod "$initial_mode" "$dir/$entry" + sudo chown "$initial_owner" "$dir/$entry" +} + +# asserts ownership and permissions for one entry from directory structure +# used in combination with iterate_dir_structure_with +assert_entry() { + dir="$1" + msg="$2" + actual_uid=$(sudo stat -c %u "$dir/$entry") + actual_mode=0$(sudo stat -c '%a' "$dir/$entry") + actual_mask=$(printf "0%03o" $(( actual_mode & expected_mode_mask ))) + assertEquals "$msg: Owner for $type '$entry'" "$expected_owner" "$actual_uid" + assertEquals "$msg: Mode mask for $type '$entry'" "$expected_mode_mask" "$actual_mask" +} + +# Iterates over directory structure assigning variables and executing command +# from the arguments for each entry. +# +# Directory structure is the following form: +# entry type initial owner -> expected uid initial mode -> expected mode mask +# . dir $HOST_OWNER -> $REDIS_UID 0555 -> 0700 +# appendonlydir dir $HOST_OWNER -> $REDIS_UID 0333 -> 0600 +# dump.rdb file $HOST_OWNER -> $REDIS_UID 0333 -> 0600 +iterate_dir_structure_with() { + awk 'NF {print $1,$2,$3,$5,$6,$8}' \ + | while read -r \ + entry \ + type \ + initial_owner \ + expected_owner \ + initial_mode \ + expected_mode_mask; \ + do + "$@" + done +} + +# Ownership and permissions test helper. +# +# This function tests the entrypoint. +# +# The idea is to test data and config files ownerhsip and permissions before and after container has started. +# +# The function creates temporary directory and uses --dir-structure (see iterate_dir_structure_with and create_entry) +# to create files and directories in this temporary dir. +# +# The temporary dir is mounted into the --mount-target inside the container. +# +# Container is started using REDIS_IMG and the function arguments as CMD. +# +# After container exits, all file permissions and ownership are checked using expected values from --dir-structure (see assert_entry) +# +# Additionally --extra-assert function is invoked if present. +# +# Arguments: +# < --mount-target DIR > +# [ --dir-structure STRING ] +# [ --extra-assert FUNCTION ] +# [ --docker-flags FLAGS ] +# +# Positional arguments: +# $docker_cmd +run_docker_and_test_ownership() { + docker_flags= + extra_assert= + dir_structure= + while [[ $# -gt 0 ]]; do + case "$1" in + --dir-structure) + dir_structure="$2" + shift 2 + ;; + --mount-target) + mount_target="$2" + shift 2 + ;; + --docker-flags) + docker_flags="$2" + shift 2 + ;; + --extra-assert) + extra_assert="$2" + shift 2 + ;; + --*) + break + ;; + *) + break + ;; + esac + done + docker_cmd="$*" + + if [ -z "$mount_target" ]; then + fail "Mount target is empty" + return 1 + fi + + dir=$(mktemp -d -p .) + + iterate_dir_structure_with create_entry "$dir" <<<"$dir_structure" + + docker_run="docker run --rm -v "$(pwd)/$dir":$mount_target $docker_flags $REDIS_IMG $docker_cmd" + if [ "$TEST_VERBOSE" ]; then + echo -e "\n#### ownership test: $docker_cmd" + echo "running $docker_run" + echo "Before:" + sudo find "$dir" -exec ls -ald {} \+ + fi + + docker_output=$($docker_run 2>&1) + + if [ "$TEST_VERBOSE" ]; then + echo "After:" + sudo find "$dir" -exec ls -ald {} \+ + echo "Docker output:" + echo "$docker_output" + fi + + iterate_dir_structure_with assert_entry "$dir" "$docker_cmd" <<<"$dir_structure" + + if [ "$extra_assert" ]; then + $extra_assert + fi + + sudo rm -rf "$dir" +} + +# running redis-server using different forms +# -v option will make redis-server to either return version or fail (if config has been provided) +# either one is OK for us +run_docker_and_test_ownership_with_common_flags_for_server() { + run_docker_and_test_ownership "${common_flags[@]}" "$@" -v + run_docker_and_test_ownership "${common_flags[@]}" redis-server "$@" -v + run_docker_and_test_ownership "${common_flags[@]}" /usr/local/bin/redis-server "$@" -v +} + +# running redis-sentinel using different forms and --dumb-option +# expecting sentinel to fail, it's ok as we are only interested in entrypoint testing here +run_docker_and_test_ownership_with_common_flags_for_sentinel() { + run_docker_and_test_ownership "${common_flags[@]}" "$@" --sentinel --dumb-option + run_docker_and_test_ownership "${common_flags[@]}" redis-sentinel "$@" --dumb-option + run_docker_and_test_ownership "${common_flags[@]}" /usr/local/bin/redis-sentinel "$@" --dumb-option + run_docker_and_test_ownership "${common_flags[@]}" redis-server "$@" --sentinel --dumb-option + run_docker_and_test_ownership "${common_flags[@]}" /usr/local/bin/redis-server "$@" --sentinel --dumb-option +} + +# start redis server or sentinel and check process uid and gid +run_redis_docker_and_check_uid_gid() { + docker_flags= + expected_cmd="redis-server" + user=redis + group=redis + file_owner= + + while [[ $# -gt 0 ]]; do + case "$1" in + --user) + user="$2" + shift 2 + ;; + --group) + group="$2" + shift 2 + ;; + --expected-cmd) + expected_cmd="$2" + shift 2 + ;; + --docker-flags) + docker_flags=$2 + shift 2 + ;; + --file-owner) + file_owner="$2" + shift 2 + ;; + --*) + fail "Unknown flag $1" + return 1 + ;; + *) + break + ;; + esac + done + + if echo "$expected_cmd" | grep -q "sentinel"; then + dir="$(readlink -f "$(mktemp -d -p .)")" + touch "$dir/sentinel.conf" + if [ "$file_owner" ]; then + sudo chown -R "$file_owner" "$dir" + fi + docker_flags="-v $dir:/etc/sentinel $docker_flags" + fi + + docker_cmd="$*" + # shellcheck disable=SC2086 + container=$(docker run $docker_flags -d "$REDIS_IMG" $docker_cmd) + ret=$? + assertTrue "Container '$docker_flags $REDIS_IMG $docker_cmd' created" "[ $ret -eq 0 ]" + wait_for_redis_server_in_container "$container" || return 1 + + cmdline=$(docker exec "$container" cat /proc/1/cmdline|tr -d \\0) + assertContains "$docker_flags $docker_cmd, cmdline: $cmdline" "$cmdline" "$expected_cmd" + + redis_user_uid=$(docker exec "$container" id -u "$user") + redis_user_gid=$(docker exec "$container" id -g "$group") + + status=$(docker exec "$container" cat /proc/1/status) + process_uid=$(echo "$status" | grep Uid | cut -f2) + process_gid=$(echo "$status" | grep Gid | cut -f2) + + assertEquals "redis cmd '$docker_cmd', process uid" "$redis_user_uid" "$process_uid" + assertEquals "redis cmd '$docker_cmd', process gid" "$redis_user_gid" "$process_gid" + + docker stop "$container" >/dev/null + if [ "$dir" ]; then + sudo rm -rf "$dir" + fi +} + +run_redis_docker_and_check_modules() { + docker_cmd="$1" + # shellcheck disable=SC2086 + container=$(docker run --rm -d "$REDIS_IMG" $docker_cmd) + ret=$? + assertTrue "Container '$docker_flags $REDIS_IMG $docker_cmd' created" "[ $ret -eq 0 ]" + wait_for_redis_server_in_container "$container" || return 1 + + info=$(docker exec "$container" redis-cli info) + + [ "$PLATFORM" ] && [ "$PLATFORM" != "amd64" ] && startSkipping + assertContains "$info" "module:name=timeseries" + assertContains "$info" "module:name=search" + assertContains "$info" "module:name=bf" + assertContains "$info" "module:name=vectorset" + assertContains "$info" "module:name=ReJSON" + + docker stop "$container" >/dev/null +} + +# helper assert function to check redis output +assert_redis_output_has_no_config_perm_error() { + s="can't open config file" + assertNotContains "cmd: $docker_cmd, docker output contains '$s': " "$docker_output" "$s" +} + +assert_redis_v8() { + assertContains "$1" "Redis server v=8" +} + +# Tests # + +test_redis_version() { + ret=$(docker run --rm "$REDIS_IMG" -v|tail -n 1) + assert_redis_v8 "$ret" +} + +test_data_dir_owner_and_perms_changed_by_server_when_data_is_RO() { + dir_structure=" + . dir $HOST_OWNER -> $REDIS_UID 0555 -> 0700 + appendonlydir dir $HOST_OWNER -> $REDIS_UID 0333 -> 0600 + dump.rdb file $HOST_OWNER -> $REDIS_UID 0333 -> 0600 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /data) + run_docker_and_test_ownership_with_common_flags_for_server +} + +test_data_dir_owner_and_perms_changed_by_server_when_appendonlydir_contains_files() { + dir_structure=" + . dir $HOST_OWNER -> $REDIS_UID 0555 -> 0700 + appendonlydir dir $HOST_OWNER -> $REDIS_UID 0333 -> 0600 + appendonlydir/foo.aof dir $HOST_OWNER -> $REDIS_UID 0333 -> 0600 + dump.rdb file $HOST_OWNER -> $REDIS_UID 0333 -> 0600 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /data) + run_docker_and_test_ownership_with_common_flags_for_server +} + +test_data_dir_owner_and_perms_changed_by_server_when_data_is_empty_and_RO() { + dir_structure=" + . dir $HOST_OWNER -> $REDIS_UID 0555 -> 0700 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /data) + run_docker_and_test_ownership_with_common_flags_for_server +} + +test_data_dir_owner_and_perms_not_changed_by_server_when_data_is_RW() { + dir_structure=" + . dir $HOST_OWNER -> $HOST_UID 0777 -> 0777 + appendonlydir dir $HOST_OWNER -> $HOST_UID 0666 -> 0666 + dump.rdb file $HOST_OWNER -> $HOST_UID 0666 -> 0666 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /data) + run_docker_and_test_ownership_with_common_flags_for_server +} + +test_data_dir_owner_and_perms_not_changed_by_server_when_data_contains_unknown_file() { + dir_structure=" + . dir $HOST_OWNER -> $HOST_UID 0555 -> 0555 + appendonlydir dir $HOST_OWNER -> $HOST_UID 0444 -> 0444 + dump.rdb file $HOST_OWNER -> $HOST_UID 0444 -> 0444 + garbage.file file $HOST_OWNER -> $HOST_UID 0444 -> 0444 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /data) + run_docker_and_test_ownership_with_common_flags_for_server +} + +test_data_dir_owner_and_perms_not_changed_by_server_when_data_contains_unknown_subdir() { + dir_structure=" + . dir $HOST_OWNER -> $HOST_UID 0555 -> 0555 + somedir dir $HOST_OWNER -> $HOST_UID 0444 -> 0444 + dump.rdb file $HOST_OWNER -> $HOST_UID 0444 -> 0444 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /data) + run_docker_and_test_ownership_with_common_flags_for_server +} + +test_data_dir_owner_not_changed_when_sentinel() { + dir_structure=" + . dir $HOST_OWNER -> $HOST_UID 0555 -> 0555 + appendonlydir dir $HOST_OWNER -> $HOST_UID 0333 -> 0333 + dump.rdb file $HOST_OWNER -> $HOST_UID 0333 -> 0333 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /data) + run_docker_and_test_ownership_with_common_flags_for_sentinel +} + + +test_config_owner_not_changed_by_server_when_config_is_readable() { + dir_structure=" + . dir $HOST_OWNER -> $HOST_UID 0555 -> 0555 + redis.conf file $HOST_OWNER -> $HOST_UID 0444 -> 0444 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /etc/redis) + run_docker_and_test_ownership_with_common_flags_for_server /etc/redis/redis.conf +} + +test_only_config_file_owner_and_perms_changed_by_server_when_only_config_is_not_readable() { + dir_structure=" + . dir $HOST_OWNER -> $HOST_UID 0555 -> 0555 + redis.conf file $HOST_OWNER -> $REDIS_UID 0000 -> 0400 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /etc/redis) + run_docker_and_test_ownership_with_common_flags_for_server /etc/redis/redis.conf +} + +test_config_file_and_dir_owner_and_perms_changed_by_server_when_not_readable() { + dir_structure=" + . dir $HOST_OWNER -> $REDIS_UID 0000 -> 0400 + redis.conf file $HOST_OWNER -> $REDIS_UID 0000 -> 0400 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /etc/redis) + run_docker_and_test_ownership_with_common_flags_for_server /etc/redis/redis.conf +} + +test_config_owner_and_perms_not_changed_when_unknown_file_exists() { + dir_structure=" + . dir $HOST_OWNER -> $HOST_UID 0000 -> 0000 + redis.conf file $HOST_OWNER -> $HOST_UID 0000 -> 0000 + garbage.file file $HOST_OWNER -> $HOST_UID 0000 -> 0000 + " + + common_flags=(--dir-structure "$dir_structure" --mount-target /etc/redis) + run_docker_and_test_ownership_with_common_flags_for_server /etc/redis/redis.conf + run_docker_and_test_ownership_with_common_flags_for_sentinel /etc/redis/redis.conf +} + +test_config_owner_and_perms_not_changed_when_unknown_subdir_exists() { + dir_structure=" + . dir $HOST_OWNER -> $HOST_UID 0000 -> 0000 + redis.conf file $HOST_OWNER -> $HOST_UID 0000 -> 0000 + some dir $HOST_OWNER -> $HOST_UID 0000 -> 0000 + " + + common_flags=(--dir-structure "$dir_structure" --mount-target /etc/redis) + run_docker_and_test_ownership_with_common_flags_for_server /etc/redis/redis.conf + run_docker_and_test_ownership_with_common_flags_for_sentinel /etc/redis/redis.conf +} + +test_config_owner_and_perms_not_changed_by_sentinel_when_config_is_RW() { + dir_structure=" + . dir $HOST_OWNER -> $HOST_UID 0777 -> 0777 + sentinel.conf file $HOST_OWNER -> $HOST_UID 0666 -> 0666 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /etc/redis/sentinel) + run_docker_and_test_ownership_with_common_flags_for_sentinel /etc/redis/sentinel/sentinel.conf +} + +test_config_file_and_dir_owner_and_perms_changed_by_sentinel_when_RO() { + dir_structure=" + . dir $HOST_OWNER -> $REDIS_UID 0555 -> 0700 + sentinel.conf file $HOST_OWNER -> $REDIS_UID 0400 -> 0600 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /etc/redis/sentinel) + run_docker_and_test_ownership_with_common_flags_for_sentinel /etc/redis/sentinel/sentinel.conf +} + +test_config_dir_owner_and_perms_changed_by_sentinel_when_only_dir_is_RO() { + dir_structure=" + . dir $HOST_OWNER -> $REDIS_UID 0555 -> 0700 + sentinel.conf file $HOST_OWNER -> $HOST_UID 0666 -> 0666 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /etc/redis/sentinel) + run_docker_and_test_ownership_with_common_flags_for_sentinel /etc/redis/sentinel/sentinel.conf +} + +test_config_owner_and_perms_changed_by_sentinel_when_config_is_WO() { + dir_structure=" + . dir $HOST_OWNER -> $REDIS_UID 0333 -> 0700 + sentinel.conf file $HOST_OWNER -> $REDIS_UID 0222 -> 0600 + " + common_flags=(--dir-structure "$dir_structure" --mount-target /etc/redis/sentinel) + run_docker_and_test_ownership_with_common_flags_for_sentinel /etc/redis/sentinel/sentinel.conf +} + +# test that entrypoint tries to start redis even when config is non existent dir +test_redis_start_reached_when_config_dir_does_not_exist() { + assert_has_config_error() { + # shellcheck disable=SC2317 + assertContains "$docker_output" "Fatal error, can't open config file" + # shellcheck disable=SC2317 + assertContains "$docker_output" "No such file or directory" + } + common_flags=(--mount-target /etc/somewhere --extra-assert assert_has_config_error) + run_docker_and_test_ownership_with_common_flags_for_server /etc/nowhere/redis.conf +} + +test_redis_start_reached_when_chown_on_data_dir_is_denied() { + assert_internal() { + # shellcheck disable=SC2317 + assert_redis_v8 "$docker_output" + } + dir_structure=" + . dir $HOST_OWNER -> $HOST_UID 0333 -> 0700 + dump.rdb file root:root -> 0 0222 -> 0222 + " + common_flags=(--mount-target /data + --dir-structure "$dir_structure" + --extra-assert assert_internal + --docker-flags "--cap-drop=chown" + ) + run_docker_and_test_ownership_with_common_flags_for_server +} + +test_data_dir_owner_and_perms_not_changed_by_server_when_data_is_RO_and_SKIP_FIX_PERMS_is_used() { + dir_structure=" + . dir $HOST_OWNER -> $HOST_UID 0555 -> 0555 + datum.rdb file $HOST_OWNER -> $HOST_UID 0444 -> 0444 + " + common_flags=(--mount-target /data + --dir-structure "$dir_structure" + --docker-flags "-e SKIP_FIX_PERMS=1" + ) + run_docker_and_test_ownership_with_common_flags_for_server +} + +test_config_owner_and_perms_not_changed_by_sentinel_when_config_is_RO_and_SKIP_FIX_PERMS_is_used() { + dir_structure=" + . dir $HOST_OWNER -> $HOST_UID 0555 -> 0555 + sentinel.conf file $HOST_OWNER -> $HOST_UID 0444 -> 0444 + " + common_flags=(--mount-target /etc/redis/sentinel + --dir-structure "$dir_structure" + --docker-flags "-e SKIP_FIX_PERMS=1" + ) + run_docker_and_test_ownership_with_common_flags_for_sentinel /etc/redis/sentinel/sentinel.conf +} + + + +test_redis_server_persistence_with_bind_mount() { + dir=$(mktemp -d -p .) + + # make data directory non writable + chmod 0444 "$dir" + + container=$(docker run --rm -d -v "$(pwd)/$dir":/data "$REDIS_IMG" --appendonly yes) + ret=$? + assertTrue "Container '$docker_flags $REDIS_IMG $docker_cmd' created" "[ $ret -eq 0 ]" + wait_for_redis_server_in_container "$container" || return 1 + + result=$(echo save | docker exec -i "$container" redis-cli) + assertEquals "OK" "$result" + + # save container hash as a value + result=$(echo "SET FOO $container" | docker exec -i "$container" redis-cli) + assertEquals "OK" "$result" + + docker stop "$container" >/dev/null + + # change the owner + sudo chown -R "$HOST_OWNER" "$dir" + + container2=$(docker run --rm -d -v "$(pwd)/$dir":/data "$REDIS_IMG") + ret=$? + assertTrue "Container '$docker_flags $REDIS_IMG $docker_cmd' created" "[ $ret -eq 0 ]" + wait_for_redis_server_in_container "$container2" || return 1 + + value=$(echo "GET FOO" | docker exec -i "$container2" redis-cli) + assertEquals "$container" "$value" + + docker stop "$container2" >/dev/null + + sudo rm -rf "$dir" +} + +test_redis_server_persistence_with_volume() { + docker volume rm test_redis >/dev/null 2>&1 || : + + docker volume create test_redis >/dev/null + + # change owner of the data volume + docker run --rm -v test_redis:/data --entrypoint=/bin/sh "$REDIS_IMG" -c 'chown -R 0:0 /data' + + container=$(docker run --rm -d -v test_redis:/data "$REDIS_IMG" --appendonly yes) + ret=$? + assertTrue "Container '$docker_flags $REDIS_IMG $docker_cmd' created" "[ $ret -eq 0 ]" + wait_for_redis_server_in_container "$container" || return 1 + + result=$(echo save | docker exec -i "$container" redis-cli) + assertEquals "OK" "$result" + + # save container hash as a value + result=$(echo "SET FOO $container" | docker exec -i "$container" redis-cli) + assertEquals "OK" "$result" + + docker stop "$container" >/dev/null + + # change owner and permissions of files in data volume + docker run --rm -v test_redis:/data --entrypoint=/bin/sh "$REDIS_IMG" -c 'chown -R 0:0 /data && chmod 0000 -R /data' + + container2=$(docker run --rm -d -v test_redis:/data "$REDIS_IMG") + ret=$? + assertTrue "Container '$docker_flags $REDIS_IMG $docker_cmd' created" "[ $ret -eq 0 ]" + wait_for_redis_server_in_container "$container2" || return 1 + + value=$(echo "GET FOO" | docker exec -i "$container2" redis-cli) + assertEquals "$container" "$value" + + docker stop "$container2" >/dev/null + + docker volume rm test_redis >/dev/null || : +} + +test_redis_process_uid_and_gid_are_redis() { + run_redis_docker_and_check_uid_gid "" + run_redis_docker_and_check_uid_gid redis-server + run_redis_docker_and_check_uid_gid /usr/local/bin/redis-server + + run_redis_docker_and_check_uid_gid --expected-cmd redis-sentinel redis-sentinel /etc/sentinel/sentinel.conf + run_redis_docker_and_check_uid_gid --expected-cmd redis-sentinel /usr/local/bin/redis-sentinel /etc/sentinel/sentinel.conf + run_redis_docker_and_check_uid_gid --expected-cmd "[sentinel]" /etc/sentinel/sentinel.conf --sentinel + run_redis_docker_and_check_uid_gid --expected-cmd "[sentinel]" redis-server /etc/sentinel/sentinel.conf --sentinel + run_redis_docker_and_check_uid_gid --expected-cmd "[sentinel]" /usr/local/bin/redis-server /etc/sentinel/sentinel.conf --sentinel +} + +test_redis_process_uid_and_gid_respects_docker_user_arg() { + read -r daemon_user_uid _ <<< "$(get_container_user_uid_gid_on_the_host daemon:daemon)" + + # disable persistence as directory data dir would not be writable + common_flags=(--user daemon --group daemon --docker-flags "--user daemon") + run_redis_docker_and_check_uid_gid "${common_flags[@]}" "" --save "" + run_redis_docker_and_check_uid_gid "${common_flags[@]}" redis-server --save "" + run_redis_docker_and_check_uid_gid "${common_flags[@]}" /usr/local/bin/redis-server --save "" + + run_redis_docker_and_check_uid_gid "${common_flags[@]}" --file-owner "$daemon_user_uid" --expected-cmd redis-sentinel redis-sentinel /etc/sentinel/sentinel.conf + run_redis_docker_and_check_uid_gid "${common_flags[@]}" --file-owner "$daemon_user_uid" --expected-cmd redis-sentinel /usr/local/bin/redis-sentinel /etc/sentinel/sentinel.conf + run_redis_docker_and_check_uid_gid "${common_flags[@]}" --file-owner "$daemon_user_uid" --expected-cmd "[sentinel]" /etc/sentinel/sentinel.conf --sentinel + run_redis_docker_and_check_uid_gid "${common_flags[@]}" --file-owner "$daemon_user_uid" --expected-cmd "[sentinel]" redis-server /etc/sentinel/sentinel.conf --sentinel + run_redis_docker_and_check_uid_gid "${common_flags[@]}" --file-owner "$daemon_user_uid" --expected-cmd "[sentinel]" /usr/local/bin/redis-server /etc/sentinel/sentinel.conf --sentinel +} + +test_redis_process_uid_and_gid_are_root_when_SKIP_DROP_PRIVS_is_used() { + common_flags=(--user root --group root --docker-flags "-e SKIP_DROP_PRIVS=1") + run_redis_docker_and_check_uid_gid "${common_flags[@]}" "" --save "" + run_redis_docker_and_check_uid_gid "${common_flags[@]}" redis-server --save "" + run_redis_docker_and_check_uid_gid "${common_flags[@]}" /usr/local/bin/redis-server --save "" + + run_redis_docker_and_check_uid_gid "${common_flags[@]}" --expected-cmd redis-sentinel redis-sentinel /etc/sentinel/sentinel.conf + run_redis_docker_and_check_uid_gid "${common_flags[@]}" --expected-cmd redis-sentinel /usr/local/bin/redis-sentinel /etc/sentinel/sentinel.conf + run_redis_docker_and_check_uid_gid "${common_flags[@]}" --expected-cmd "[sentinel]" /etc/sentinel/sentinel.conf --sentinel + run_redis_docker_and_check_uid_gid "${common_flags[@]}" --expected-cmd "[sentinel]" redis-server /etc/sentinel/sentinel.conf --sentinel + run_redis_docker_and_check_uid_gid "${common_flags[@]}" --expected-cmd "[sentinel]" /usr/local/bin/redis-server /etc/sentinel/sentinel.conf --sentinel +} + +test_redis_server_modules_are_loaded() { + run_redis_docker_and_check_modules + run_redis_docker_and_check_modules redis-server + run_redis_docker_and_check_modules /usr/local/bin/redis-server +} + +# shellcheck disable=SC1091 +. ./shunit2 diff --git a/test/run-shell-func-tests.sh b/test/run-shell-func-tests.sh new file mode 100755 index 000000000..f772ee9e4 --- /dev/null +++ b/test/run-shell-func-tests.sh @@ -0,0 +1,73 @@ +#!/bin/bash +set -e -o pipefail +SCRIPT_DIR="$(dirname -- "$( readlink -f -- "$0"; )")" +# shellcheck disable=SC1091 +. "$SCRIPT_DIR/../.github/actions/common/func.sh" + +source_helper_file "helpers.sh" + +set -u + +init_console_output + +test_redis_version_split() { + local major minor patch suffix + local version + + version="8.2.1" + IFS=: read -r major minor patch suffix < <(redis_version_split "$version") + assertEquals "return code for $version" "0" "$?" + assertEquals "major of $version" "8" "$major" + assertEquals "minor of $version" "2" "$minor" + assertEquals "patch of $version" "1" "$patch" + assertEquals "suffix of $version" "" "$suffix" + + version="v8.2.1" + IFS=: read -r major minor patch suffix < <(redis_version_split "$version") + assertEquals "return code for $version" "0" "$?" + assertEquals "major of $version" "8" "$major" + assertEquals "minor of $version" "2" "$minor" + assertEquals "patch of $version" "1" "$patch" + assertEquals "suffix of $version" "" "$suffix" + + version="8.0-m01" + IFS=: read -r major minor patch suffix < <(redis_version_split "$version") + assertEquals "return code for $version" "0" "$?" + assertEquals "major of $version" "8" "$major" + assertEquals "minor of $version" "0" "$minor" + assertEquals "patch of $version" "" "$patch" + assertEquals "suffix of $version" "-m01" "$suffix" + + version="v8.0-m01" + IFS=: read -r major minor patch suffix < <(redis_version_split "$version") + assertEquals "return code for $version" "0" "$?" + assertEquals "major of $version" "8" "$major" + assertEquals "minor of $version" "0" "$minor" + assertEquals "patch of $version" "" "$patch" + assertEquals "suffix of $version" "-m01" "$suffix" + + version="8.0.3-m03-int" + IFS=: read -r major minor patch suffix < <(redis_version_split "$version") + assertEquals "return code for $version" "0" "$?" + assertEquals "major of $version" "8" "$major" + assertEquals "minor of $version" "0" "$minor" + assertEquals "patch of $version" "3" "$patch" + assertEquals "suffix of $version" "-m03-int" "$suffix" + + version="v8.0.3-m03-int" + IFS=: read -r major minor patch suffix < <(redis_version_split "$version") + assertEquals "return code for $version" "0" "$?" + assertEquals "major of $version" "8" "$major" + assertEquals "minor of $version" "0" "$minor" + assertEquals "patch of $version" "3" "$patch" + assertEquals "suffix of $version" "-m03-int" "$suffix" +} + +test_redis_version_split_fail() { + IFS=: read -r major minor patch suffix < <(redis_version_split 8.x.x) + assertNotEquals "return code" "0" "$?" +} + + +# shellcheck disable=SC1091 +. "$SCRIPT_DIR/shunit2" \ No newline at end of file diff --git a/test/shunit2 b/test/shunit2 new file mode 100755 index 000000000..7b7c7c199 --- /dev/null +++ b/test/shunit2 @@ -0,0 +1,1612 @@ +#! /bin/sh +# vim:et:ft=sh:sts=2:sw=2 +# +# shUnit2 -- Unit testing framework for Unix shell scripts. +# +# Copyright 2008-2021 Kate Ward. All Rights Reserved. +# Released under the Apache 2.0 license. +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Author: kate.ward@forestent.com (Kate Ward) +# https://github.com/kward/shunit2 +# +# shUnit2 is a xUnit based unit test framework for Bourne shell scripts. It is +# based on the popular JUnit unit testing framework for Java. +# +# `expr` may be antiquated, but it is the only solution in some cases. +# shellcheck disable=SC2003 +# Allow usage of legacy backticked `...` notation instead of $(...). +# shellcheck disable=SC2006 + +# Return if shunit2 already loaded. +if test -n "${SHUNIT_VERSION:-}"; then + exit 0 +fi +SHUNIT_VERSION='2.1.9pre' + +# Return values that scripts can use. +SHUNIT_TRUE=0 +SHUNIT_FALSE=1 +SHUNIT_ERROR=2 + +# Determine if `builtin` command exists. +__SHUNIT_BUILTIN='builtin' +# shellcheck disable=2039 +if ! ("${__SHUNIT_BUILTIN}" echo 123 >/dev/null 2>&1); then + __SHUNIT_BUILTIN='' +fi + +# Determine some reasonable command defaults. +__SHUNIT_CMD_ECHO_ESC='echo -e' +# shellcheck disable=SC2039,SC3037 +if ${__SHUNIT_BUILTIN} [ "`echo -e test`" = '-e test' ]; then + __SHUNIT_CMD_ECHO_ESC='echo' +fi + +# Determine if `date` supports nanosecond resolution. +__SHUNIT_CMD_DATE_SECONDS='date +%s.%N' +if ${__SHUNIT_BUILTIN} [ "`date +%N`" = '%N' ]; then + __SHUNIT_CMD_DATE_SECONDS='date +%s' +fi + +# Determine `bc` command. +__SHUNIT_CMD_BC='bc' +if ! (${__SHUNIT_CMD_BC} --help >/dev/null 2>&1); then + __SHUNIT_CMD_BC='busybox bc' +fi +if ! (${__SHUNIT_CMD_BC} --help >/dev/null 2>&1); then + __SHUNIT_CMD_BC='' +fi + +# Determine `dc` command. +__SHUNIT_CMD_DC='dc' +if ! (${__SHUNIT_CMD_DC} --help >/dev/null 2>&1); then + __SHUNIT_CMD_DC='busybox dc' +fi +if ! (${__SHUNIT_CMD_DC} --help >/dev/null 2>&1); then + __SHUNIT_CMD_DC='' +fi + +# Format float numbers to the single style from different tools. +# Args: +# num: string: float number to format +# Returns: +# string: formatted number. Empty string if error occurs. +_shunit_float_format() { + # Double-dot number is an error. + # No need to format if the number is an integer. + case "${1}" in + *.*.*) + return + ;; + *.*) + ;; + *) + echo "${1}" + return + ;; + esac + + _shunit_format_result_="$1" + + # Add leading zero if needed. + _shunit_format_result_="$(echo "${_shunit_format_result_}" \ + |command sed 's/^\./0./g')" + + # Remove trailing zeros. + _shunit_format_result_="$(echo "${_shunit_format_result_}" \ + |command sed 's/0\+$//g')" + + # Remove trailing dot. + _shunit_format_result_="$(echo "${_shunit_format_result_}" \ + |command sed 's/\.$//g')" + + # Print the result. + echo "${_shunit_format_result_}" + unset _shunit_format_result_ +} + +# Calculate numbers using bc. +# Args: +# left: string: left operand (may be float point) +# operation: string: operation (+ - * /) +# right: string: right operand (may be float point) +# Returns: +# string: result +_shunit_calc_bc() { + _shunit_output_="$(echo "$@" \ + |command ${__SHUNIT_CMD_BC:?})" + shunit_return=$? + if ${__SHUNIT_BUILTIN} [ ${shunit_return} -eq ${SHUNIT_TRUE} ]; then + _shunit_float_format "${_shunit_output_}" + shunit_return=$? + fi + + unset _shunit_output_ + return ${shunit_return} +} + +# Calculate numbers using dc. +# Args: +# left: string: left operand (may be float point) +# operation: string: operation (+ - * /) +# right: string: right operand (may be float point) +# Returns: +# string: result +_shunit_calc_dc() { + _shunit_output_="$(echo "$1" "$3" "$2" "p" \ + |command ${__SHUNIT_CMD_DC:?})" + shunit_return=$? + if ${__SHUNIT_BUILTIN} [ ${shunit_return} -eq ${SHUNIT_TRUE} ]; then + _shunit_float_format "${_shunit_output_}" + shunit_return=$? + fi + + unset _shunit_output_ + return ${shunit_return} +} + +# Calculate numbers using expr. +# Args: +# left: string: left integer number operand +# operation: string: operation (+ - * /) +# right: string: right integer number operand +# Returns: +# string: result. Empty string if error occurs. +_shunit_calc_expr() { + expr "$@" 2>/dev/null || ${__SHUNIT_BUILTIN} true +} + +# Determine what command to use for calculating numbers. +__SHUNIT_CMD_CALC='_shunit_calc_bc' +if ! ("${__SHUNIT_CMD_CALC}" 1 + 2 >/dev/null 2>&1); then + __SHUNIT_CMD_CALC=_shunit_calc_dc +fi +if ! ("${__SHUNIT_CMD_CALC}" 1 + 2 >/dev/null 2>&1); then + __SHUNIT_CMD_CALC=_shunit_calc_expr +fi + +# Commands a user can override if needed. +__SHUNIT_CMD_TPUT='tput' +SHUNIT_CMD_TPUT=${SHUNIT_CMD_TPUT:-${__SHUNIT_CMD_TPUT}} + +# Enable color output. Options are 'auto', 'always', or 'never'. +SHUNIT_COLOR=${SHUNIT_COLOR:-auto} + +# +# Internal constants. +# + +__SHUNIT_MODE_SOURCED='sourced' +__SHUNIT_MODE_STANDALONE='standalone' +__SHUNIT_PARENT=${SHUNIT_PARENT:-$0} + +# User provided test prefix to display in front of the name of the test being +# executed. Define by setting the SHUNIT_TEST_PREFIX variable. +__SHUNIT_TEST_PREFIX=${SHUNIT_TEST_PREFIX:-} + +# ANSI colors. +__SHUNIT_ANSI_NONE='\033[0m' +__SHUNIT_ANSI_RED='\033[1;31m' +__SHUNIT_ANSI_GREEN='\033[1;32m' +__SHUNIT_ANSI_YELLOW='\033[1;33m' +__SHUNIT_ANSI_CYAN='\033[1;36m' + +# +# Internal variables. +# + +# Variables. +__shunit_lineno='' # Line number of executed test. +__shunit_mode=${__SHUNIT_MODE_SOURCED} # Operating mode. +__shunit_reportGenerated=${SHUNIT_FALSE} # Is report generated. +__shunit_script='' # Filename of unittest script (standalone mode). +__shunit_skip=${SHUNIT_FALSE} # Is skipping enabled. +__shunit_suite='' # Suite of tests to execute. +__shunit_clean=${SHUNIT_FALSE} # _shunit_cleanup() was already called. +__shunit_suiteName='' # Text name of current test suite. +__shunit_xmlSuiteName='' # XML-ready text name of current test suite. + +# JUnit XML variables. +__shunit_junitXmlOutputFile='' # File to use for JUnit XML output in addition to stdout. +__shunit_junitXmlTestCases='' # Test cases info in the JUnit XML format for output +__shunit_junitXmlCurrentTestCaseErrors='' # Current test case error info in the JUnit XML format for output + +# Time variables +__shunit_startSuiteTime='' # When the suite execution was started +__shunit_endSuiteTime='' # When the suite execution ended +__shunit_startCaseTime='' # When the case execution was started +__shunit_endCaseTime='' # When the case execution ended + +# ANSI colors (populated by _shunit_configureColor()). +__shunit_ansi_none='' +__shunit_ansi_red='' +__shunit_ansi_green='' +__shunit_ansi_yellow='' +__shunit_ansi_cyan='' + +# Counts of tests. +__shunit_testSuccess=${SHUNIT_TRUE} +__shunit_testsTotal=0 +__shunit_testsPassed=0 +__shunit_testsFailed=0 + +# Counts of asserts. +__shunit_assertsTotal=0 +__shunit_assertsPassed=0 +__shunit_assertsFailed=0 +__shunit_assertsSkipped=0 +__shunit_assertsCurrentTest=0 + +# +# Internal functions. +# + +# Logging. +_shunit_warn() { + ${__SHUNIT_CMD_ECHO_ESC} "${__shunit_ansi_yellow}shunit2:WARN${__shunit_ansi_none} $*" >&2 +} +_shunit_error() { + ${__SHUNIT_CMD_ECHO_ESC} "${__shunit_ansi_red}shunit2:ERROR${__shunit_ansi_none} $*" >&2 +} +_shunit_fatal() { + ${__SHUNIT_CMD_ECHO_ESC} "${__shunit_ansi_red}shunit2:FATAL${__shunit_ansi_none} $*" >&2 + exit ${SHUNIT_ERROR} +} + +# +# Macros. +# + +# shellcheck disable=SC2016,SC2089 +_SHUNIT_LINENO_='eval __shunit_lineno=""; if ${__SHUNIT_BUILTIN} [ "${1:-}" = "--lineno" ] && ${__SHUNIT_BUILTIN} [ -n "${2:-}" ]; then __shunit_lineno="[${2}]"; shift 2; fi;' + +# +# Setup. +# + +# Specific shell checks. +if ${__SHUNIT_BUILTIN} [ -n "${ZSH_VERSION:-}" ]; then + setopt |grep "^shwordsplit$" >/dev/null + if ${__SHUNIT_BUILTIN} [ $? -ne ${SHUNIT_TRUE} ]; then + _shunit_fatal 'zsh shwordsplit option is required for proper operation' + fi + if ${__SHUNIT_BUILTIN} [ -z "${SHUNIT_PARENT:-}" ]; then + _shunit_fatal "zsh does not pass \$0 through properly. please declare \ +\"SHUNIT_PARENT=\$0\" before calling shUnit2" + fi +fi + +# Set the constants readonly. +__shunit_constants=`set |grep '^__SHUNIT_' |cut -d= -f1` +echo "${__shunit_constants}" |grep '^Binary file' >/dev/null && \ + __shunit_constants=`set |grep -a '^__SHUNIT_' |cut -d= -f1` +for __shunit_const in ${__shunit_constants}; do + if ${__SHUNIT_BUILTIN} [ -z "${ZSH_VERSION:-}" ]; then + readonly "${__shunit_const}" + else + case ${ZSH_VERSION} in + [123].*) readonly "${__shunit_const}" ;; + *) + # Declare readonly constants globally. + # shellcheck disable=SC2039,SC3045 + readonly -g "${__shunit_const}" + esac + fi +done +unset __shunit_const __shunit_constants + +#----------------------------------------------------------------------------- +# Assertion functions. +# + +# Assert that two values are equal to one another. +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertEquals() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "assertEquals() requires two or three arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_expected_=$1 + shunit_actual_=$2 + + shunit_return=${SHUNIT_TRUE} + if ${__SHUNIT_BUILTIN} [ "${shunit_expected_}" = "${shunit_actual_}" ]; then + _shunit_assertPass + else + failNotEquals "${shunit_message_}" "${shunit_expected_}" "${shunit_actual_}" + shunit_return=${SHUNIT_FALSE} + fi + + unset shunit_message_ shunit_expected_ shunit_actual_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_EQUALS_='eval assertEquals --lineno "${LINENO:-}"' + +# Assert that two values are not equal to one another. +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertNotEquals() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "assertNotEquals() requires two or three arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_expected_=$1 + shunit_actual_=$2 + + shunit_return=${SHUNIT_TRUE} + if ${__SHUNIT_BUILTIN} [ "${shunit_expected_}" != "${shunit_actual_}" ]; then + _shunit_assertPass + else + failSame "${shunit_message_}" "${shunit_expected_}" "${shunit_actual_}" + shunit_return=${SHUNIT_FALSE} + fi + + unset shunit_message_ shunit_expected_ shunit_actual_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_NOT_EQUALS_='eval assertNotEquals --lineno "${LINENO:-}"' + +# Assert that a container contains a content. +# +# Args: +# message: string: failure message [optional] +# container: string: container to analyze +# content: string: content to find +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertContains() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "assertContains() requires two or three arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_container_=$1 + shunit_content_=$2 + shunit_return=${SHUNIT_TRUE} + if echo "${shunit_container_}" |grep -F -- "${shunit_content_}" >/dev/null; then + _shunit_assertPass + else + failNotFound "${shunit_message_}" "${shunit_content_}" + shunit_return=${SHUNIT_FALSE} + fi + + unset shunit_message_ shunit_container_ shunit_content_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_CONTAINS_='eval assertContains --lineno "${LINENO:-}"' + +# Assert that a container does not contain a content. +# +# Args: +# message: string: failure message [optional] +# container: string: container to analyze +# content: string: content to look for +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertNotContains() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "assertNotContains() requires two or three arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_container_=$1 + shunit_content_=$2 + + shunit_return=${SHUNIT_TRUE} + if echo "$shunit_container_" |grep -F -- "$shunit_content_" > /dev/null; then + failFound "${shunit_message_}" "${shunit_content_}" + shunit_return=${SHUNIT_FALSE} + else + _shunit_assertPass + fi + + unset shunit_message_ shunit_container_ shunit_content_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_NOT_CONTAINS_='eval assertNotContains --lineno "${LINENO:-}"' + +# Assert that a value is null (i.e. an empty string). +# +# Args: +# message: string: failure message [optional] +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertNull() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -gt 2 ]; then + # Allowing 0 arguments as $1 might actually be null. + _shunit_error "assertNull() requires one or two arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 2 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + + ${__SHUNIT_BUILTIN} test -z "${1:-}" + assertTrue "${shunit_message_}" $? + shunit_return=$? + + unset shunit_message_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_NULL_='eval assertNull --lineno "${LINENO:-}"' + +# Assert that a value is not null (i.e. a non-empty string). +# +# Args: +# message: string: failure message [optional] +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertNotNull() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -gt 2 ]; then + # Allowing 0 arguments as $1 might actually be null. + _shunit_error "assertNotNull() requires one or two arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 2 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + + ${__SHUNIT_BUILTIN} test -n "${1:-}" + assertTrue "${shunit_message_}" $? + shunit_return=$? + + unset shunit_message_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_NOT_NULL_='eval assertNotNull --lineno "${LINENO:-}"' + +# Assert that two values are the same (i.e. equal to one another). +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertSame() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "assertSame() requires two or three arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + assertEquals "${shunit_message_}" "$1" "$2" + shunit_return=$? + + unset shunit_message_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_SAME_='eval assertSame --lineno "${LINENO:-}"' + +# Assert that two values are not the same (i.e. not equal to one another). +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertNotSame() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "assertNotSame() requires two or three arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 3 ]; then + shunit_message_="${shunit_message_:-}$1" + shift + fi + assertNotEquals "${shunit_message_}" "$1" "$2" + shunit_return=$? + + unset shunit_message_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_NOT_SAME_='eval assertNotSame --lineno "${LINENO:-}"' + +# Assert that a value or shell test condition is true. +# +# In shell, a value of 0 is true and a non-zero value is false. Any integer +# value passed can thereby be tested. +# +# Shell supports much more complicated tests though, and a means to support +# them was needed. As such, this function tests that conditions are true or +# false through evaluation rather than just looking for a true or false. +# +# The following test will succeed: +# assertTrue 0 +# assertTrue "[ 34 -gt 23 ]" +# The following test will fail with a message: +# assertTrue 123 +# assertTrue "test failed" "[ -r '/non/existent/file' ]" +# +# Args: +# message: string: failure message [optional] +# condition: string: integer value or shell conditional statement +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertTrue() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -lt 1 -o $# -gt 2 ]; then + _shunit_error "assertTrue() takes one or two arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 2 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_condition_=$1 + + # See if condition is an integer, i.e. a return value. + shunit_return=${SHUNIT_TRUE} + if ${__SHUNIT_BUILTIN} [ -z "${shunit_condition_}" ]; then + # Null condition. + shunit_return=${SHUNIT_FALSE} + elif (expr \( "${shunit_condition_}" + '0' \) '=' "${shunit_condition_}" >/dev/null 2>&1) + then + # Possible return value. Treating 0 as true, and non-zero as false. + if ${__SHUNIT_BUILTIN} [ "${shunit_condition_}" -ne 0 ]; then + shunit_return=${SHUNIT_FALSE} + fi + else + # Hopefully... a condition. + if ! eval "${shunit_condition_}" >/dev/null 2>&1; then + shunit_return=${SHUNIT_FALSE} + fi + fi + + # Record the test. + if ${__SHUNIT_BUILTIN} [ ${shunit_return} -eq ${SHUNIT_TRUE} ]; then + _shunit_assertPass + else + _shunit_assertFail "${shunit_message_}" + fi + + unset shunit_message_ shunit_condition_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_TRUE_='eval assertTrue --lineno "${LINENO:-}"' + +# Assert that a value or shell test condition is false. +# +# In shell, a value of 0 is true and a non-zero value is false. Any integer +# value passed can thereby be tested. +# +# Shell supports much more complicated tests though, and a means to support +# them was needed. As such, this function tests that conditions are true or +# false through evaluation rather than just looking for a true or false. +# +# The following test will succeed: +# assertFalse 1 +# assertFalse "[ 'apples' = 'oranges' ]" +# The following test will fail with a message: +# assertFalse 0 +# assertFalse "test failed" "[ 1 -eq 1 -a 2 -eq 2 ]" +# +# Args: +# message: string: failure message [optional] +# condition: string: integer value or shell conditional statement +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertFalse() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -lt 1 -o $# -gt 2 ]; then + _shunit_error "assertFalse() requires one or two arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 2 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_condition_=$1 + + # See if condition is an integer, i.e. a return value. + shunit_return=${SHUNIT_TRUE} + if ${__SHUNIT_BUILTIN} [ -z "${shunit_condition_}" ]; then + # Null condition. + shunit_return=${SHUNIT_TRUE} + elif (expr \( "${shunit_condition_}" + '0' \) '=' "${shunit_condition_}" >/dev/null 2>&1); then + # Possible return value. Treating 0 as true, and non-zero as false. + if ${__SHUNIT_BUILTIN} [ "${shunit_condition_}" -eq 0 ]; then + shunit_return=${SHUNIT_FALSE} + fi + else + # Hopefully... a condition. + # shellcheck disable=SC2086 + if eval ${shunit_condition_} >/dev/null 2>&1; then + shunit_return=${SHUNIT_FALSE} + fi + fi + + # Record the test. + if ${__SHUNIT_BUILTIN} [ "${shunit_return}" -eq "${SHUNIT_TRUE}" ]; then + _shunit_assertPass + else + _shunit_assertFail "${shunit_message_}" + fi + + unset shunit_message_ shunit_condition_ + return "${shunit_return}" +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_FALSE_='eval assertFalse --lineno "${LINENO:-}"' + +#----------------------------------------------------------------------------- +# Failure functions. +# + +# Records a test failure. +# +# Args: +# message: string: failure message [optional] +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +fail() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -gt 1 ]; then + _shunit_error "fail() requires zero or one arguments; $# given" + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 1 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + + _shunit_assertFail "${shunit_message_}" + + unset shunit_message_ + return ${SHUNIT_FALSE} +} +# shellcheck disable=SC2016,SC2034 +_FAIL_='eval fail --lineno "${LINENO:-}"' + +# Records a test failure, stating two values were not equal. +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +failNotEquals() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "failNotEquals() requires one or two arguments; $# given" + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_expected_=$1 + shunit_actual_=$2 + + shunit_message_=${shunit_message_%% } + _shunit_assertFail "${shunit_message_:+${shunit_message_} }expected:<${shunit_expected_}> but was:<${shunit_actual_}>" + + unset shunit_message_ shunit_expected_ shunit_actual_ + return ${SHUNIT_FALSE} +} +# shellcheck disable=SC2016,SC2034 +_FAIL_NOT_EQUALS_='eval failNotEquals --lineno "${LINENO:-}"' + +# Records a test failure, stating a value was found. +# +# Args: +# message: string: failure message [optional] +# content: string: found value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +failFound() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -lt 1 -o $# -gt 2 ]; then + _shunit_error "failFound() requires one or two arguments; $# given" + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 2 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_content_=$1 + + shunit_message_=${shunit_message_%% } + _shunit_assertFail "${shunit_message_:+${shunit_message_} }found:<${shunit_content_}>" + + unset shunit_message_ shunit_content_ + return ${SHUNIT_FALSE} +} +# shellcheck disable=SC2016,SC2034 +_FAIL_FOUND_='eval failFound --lineno "${LINENO:-}"' + +# Records a test failure, stating a content was not found. +# +# Args: +# message: string: failure message [optional] +# content: string: content not found +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +failNotFound() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -lt 1 -o $# -gt 2 ]; then + _shunit_error "failNotFound() requires one or two arguments; $# given" + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 2 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_content_=$1 + + shunit_message_=${shunit_message_%% } + _shunit_assertFail "${shunit_message_:+${shunit_message_} }not found:<${shunit_content_}>" + + unset shunit_message_ shunit_content_ + return ${SHUNIT_FALSE} +} +# shellcheck disable=SC2016,SC2034 +_FAIL_NOT_FOUND_='eval failNotFound --lineno "${LINENO:-}"' + +# Records a test failure, stating two values should have been the same. +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +failSame() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "failSame() requires two or three arguments; $# given" + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + + shunit_message_=${shunit_message_%% } + _shunit_assertFail "${shunit_message_:+${shunit_message_} }expected not same" + + unset shunit_message_ + return ${SHUNIT_FALSE} +} +# shellcheck disable=SC2016,SC2034 +_FAIL_SAME_='eval failSame --lineno "${LINENO:-}"' + +# Records a test failure, stating two values were not equal. +# +# This is functionally equivalent to calling failNotEquals(). +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +failNotSame() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if ${__SHUNIT_BUILTIN} [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "failNotSame() requires one or two arguments; $# given" + return ${SHUNIT_ERROR} + fi + if _shunit_shouldSkip; then + return ${SHUNIT_TRUE} + fi + + shunit_message_=${__shunit_lineno} + if ${__SHUNIT_BUILTIN} [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + failNotEquals "${shunit_message_}" "$1" "$2" + shunit_return=$? + + unset shunit_message_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_FAIL_NOT_SAME_='eval failNotSame --lineno "${LINENO:-}"' + +#----------------------------------------------------------------------------- +# Skipping functions. +# + +# Force remaining assert and fail functions to be "skipped". +# +# This function forces the remaining assert and fail functions to be "skipped", +# i.e. they will have no effect. Each function skipped will be recorded so that +# the total of asserts and fails will not be altered. +# +# Args: +# message: string: message to provide to user [optional] +startSkipping() { + if ${__SHUNIT_BUILTIN} [ $# -gt 0 ]; then _shunit_warn "[skipping] $*"; fi + __shunit_skip=${SHUNIT_TRUE} +} + +# Resume the normal recording behavior of assert and fail calls. +# +# Args: +# None +endSkipping() { __shunit_skip=${SHUNIT_FALSE}; } + +# Returns the state of assert and fail call skipping. +# +# Args: +# None +# Returns: +# boolean: (TRUE/FALSE constant) +isSkipping() { return ${__shunit_skip}; } + +#----------------------------------------------------------------------------- +# Suite functions. +# + +# Stub. This function should contains all unit test calls to be made. +# +# DEPRECATED (as of 2.1.0) +# +# This function can be optionally overridden by the user in their test suite. +# +# If this function exists, it will be called when shunit2 is sourced. If it +# does not exist, shunit2 will search the parent script for all functions +# beginning with the word 'test', and they will be added dynamically to the +# test suite. +# +# This function should be overridden by the user in their unit test suite. +# Note: see _shunit_mktempFunc() for actual implementation +# +# Args: +# None +#suite() { :; } # DO NOT UNCOMMENT THIS FUNCTION + +# Adds a function name to the list of tests schedule for execution. +# +# This function should only be called from within the suite() function. +# +# Args: +# function: string: name of a function to add to current unit test suite +suite_addTest() { + shunit_func_=${1:-} + + __shunit_suite="${__shunit_suite:+${__shunit_suite} }${shunit_func_}" + __shunit_testsTotal=`expr "${__shunit_testsTotal}" + 1` + + unset shunit_func_ +} + +# Stub. This function will be called once before any tests are run. +# +# Common one-time environment preparation tasks shared by all tests can be +# defined here. +# +# This function should be overridden by the user in their unit test suite. +# Note: see _shunit_mktempFunc() for actual implementation +# +# Args: +# None +#oneTimeSetUp() { :; } # DO NOT UNCOMMENT THIS FUNCTION + +# Stub. This function will be called once after all tests are finished. +# +# Common one-time environment cleanup tasks shared by all tests can be defined +# here. +# +# This function should be overridden by the user in their unit test suite. +# Note: see _shunit_mktempFunc() for actual implementation +# +# Args: +# None +#oneTimeTearDown() { :; } # DO NOT UNCOMMENT THIS FUNCTION + +# Stub. This function will be called before each test is run. +# +# Common environment preparation tasks shared by all tests can be defined here. +# +# This function should be overridden by the user in their unit test suite. +# Note: see _shunit_mktempFunc() for actual implementation +# +# Args: +# None +#setUp() { :; } # DO NOT UNCOMMENT THIS FUNCTION + +# Note: see _shunit_mktempFunc() for actual implementation +# Stub. This function will be called after each test is run. +# +# Common environment cleanup tasks shared by all tests can be defined here. +# +# This function should be overridden by the user in their unit test suite. +# Note: see _shunit_mktempFunc() for actual implementation +# +# Args: +# None +#tearDown() { :; } # DO NOT UNCOMMENT THIS FUNCTION + +#------------------------------------------------------------------------------ +# Internal shUnit2 functions. +# + +# Create a temporary directory to store various run-time files in. +# +# This function is a cross-platform temporary directory creation tool. Not all +# OSes have the `mktemp` function, so one is included here. +# +# Args: +# None +# Outputs: +# string: the temporary directory that was created +_shunit_mktempDir() { + # Try the standard `mktemp` function. + if ( exec mktemp -dqt shunit.XXXXXX 2>/dev/null ); then + return + fi + + # The standard `mktemp` didn't work. Use our own. + # shellcheck disable=SC2039,SC3028 + if ${__SHUNIT_BUILTIN} [ -r '/dev/urandom' -a -x '/usr/bin/od' ]; then + _shunit_random_=`/usr/bin/od -vAn -N4 -tx4 "${_shunit_file_}" +#! /bin/sh +exit ${SHUNIT_TRUE} +EOF + command chmod +x "${_shunit_file_}" + done + + unset _shunit_file_ +} + +# Final cleanup function to leave things as we found them. +# +# Besides removing the temporary directory, this function is in charge of the +# final exit code of the unit test. The exit code is based on how the script +# was ended (e.g. normal exit, or via Ctrl-C). +# +# Args: +# name: string: name of the trap called (specified when trap defined) +_shunit_cleanup() { + _shunit_name_=$1 + + _shunit_signal_=0 + case "${_shunit_name_}" in + EXIT) ;; + INT) _shunit_signal_=130 ;; # 2+128 + TERM) _shunit_signal_=143 ;; # 15+128 + *) + _shunit_error "unrecognized trap value (${_shunit_name_})" + ;; + esac + if ${__SHUNIT_BUILTIN} [ "${_shunit_name_}" != 'EXIT' ]; then + _shunit_warn "trapped and now handling the (${_shunit_name_}) signal" + fi + + # Do our work. + if ${__SHUNIT_BUILTIN} [ ${__shunit_clean} -eq ${SHUNIT_FALSE} ]; then + # Ensure tear downs are only called once. + __shunit_clean=${SHUNIT_TRUE} + + tearDown || _shunit_warn 'tearDown() returned non-zero return code.' + __shunit_endCaseTime=`${__SHUNIT_CMD_DATE_SECONDS}` + oneTimeTearDown || \ + _shunit_warn 'oneTimeTearDown() returned non-zero return code.' + __shunit_endSuiteTime=`${__SHUNIT_CMD_DATE_SECONDS}` + + command rm -fr "${__shunit_tmpDir}" + fi + + if ${__SHUNIT_BUILTIN} [ "${_shunit_name_}" != 'EXIT' ]; then + # Handle all non-EXIT signals. + trap - 0 # Disable EXIT trap. + exit ${_shunit_signal_} + elif ${__SHUNIT_BUILTIN} [ ${__shunit_reportGenerated} -eq ${SHUNIT_FALSE} ]; then + _shunit_assertFail 'unknown failure encountered running a test' + _shunit_generateReport + exit ${SHUNIT_ERROR} + fi + + unset _shunit_name_ _shunit_signal_ +} + +# configureColor based on user color preference. +# +# Args: +# color: string: color mode (one of `always`, `auto`, or `never`). +_shunit_configureColor() { + _shunit_color_=${SHUNIT_FALSE} # By default, no color. + case $1 in + 'always') _shunit_color_=${SHUNIT_TRUE} ;; + 'auto') + if ${__SHUNIT_BUILTIN} [ "`_shunit_colors`" -ge 8 ]; then + _shunit_color_=${SHUNIT_TRUE} + fi + ;; + 'never'|'none') ;; # Support 'none' to support legacy usage. + *) _shunit_fatal "unrecognized color option '$1'" ;; + esac + + # shellcheck disable=SC2254 + case ${_shunit_color_} in + ${SHUNIT_TRUE}) + __shunit_ansi_none=${__SHUNIT_ANSI_NONE} + __shunit_ansi_red=${__SHUNIT_ANSI_RED} + __shunit_ansi_green=${__SHUNIT_ANSI_GREEN} + __shunit_ansi_yellow=${__SHUNIT_ANSI_YELLOW} + __shunit_ansi_cyan=${__SHUNIT_ANSI_CYAN} + ;; + ${SHUNIT_FALSE}) + __shunit_ansi_none='' + __shunit_ansi_red='' + __shunit_ansi_green='' + __shunit_ansi_yellow='' + __shunit_ansi_cyan='' + ;; + esac + + unset _shunit_color_ _shunit_tput_ +} + +# colors returns the number of supported colors for the TERM. +_shunit_colors() { + if _shunit_tput_=`${SHUNIT_CMD_TPUT} colors 2>/dev/null`; then + echo "${_shunit_tput_}" + else + echo 16 + fi + unset _shunit_tput_ +} + +# The actual running of the tests happens here. +# +# Args: +# None +_shunit_execSuite() { + for _shunit_test_ in ${__shunit_suite}; do + __shunit_testSuccess=${SHUNIT_TRUE} + + # Reset per-test info + __shunit_assertsCurrentTest=0 + __shunit_junitXmlCurrentTestCaseErrors='' + + # Disable skipping. + endSkipping + + __shunit_startCaseTime=`${__SHUNIT_CMD_DATE_SECONDS}` + + # Execute the per-test setUp() function. + if ! setUp; then + _shunit_fatal "setUp() returned non-zero return code." + fi + + # Execute the test. + echo "${__SHUNIT_TEST_PREFIX}${_shunit_test_}" + # shellcheck disable=SC2086 + if ! eval ${_shunit_test_}; then + _shunit_error "${_shunit_test_}() returned non-zero return code." + __shunit_testSuccess=${SHUNIT_ERROR} + fi + + # Execute the per-test tearDown() function. + if ! tearDown; then + _shunit_fatal "tearDown() returned non-zero return code." + fi + __shunit_endCaseTime=`${__SHUNIT_CMD_DATE_SECONDS}` + + _shunit_test_execution_time_=`"${__SHUNIT_CMD_CALC}" "${__shunit_endCaseTime}" - "${__shunit_startCaseTime}"` + + # Store current test case info in JUnit XML. + __shunit_junitXmlTestCases="${__shunit_junitXmlTestCases} + ${__shunit_junitXmlCurrentTestCaseErrors} + " + + # Update stats. + if ${__SHUNIT_BUILTIN} [ ${__shunit_testSuccess} -eq ${SHUNIT_TRUE} ]; then + __shunit_testsPassed=`expr "${__shunit_testsPassed}" + 1` + else + __shunit_testsFailed=`expr "${__shunit_testsFailed}" + 1` + fi + done + + unset _shunit_test_ _shunit_test_execution_time_ +} + +# Generates the user friendly report with appropriate OK/FAILED message. +# +# Args: +# None +# Output: +# string: the report of successful and failed tests, as well as totals. +_shunit_generateReport() { + if ${__SHUNIT_BUILTIN} [ "${__shunit_reportGenerated}" -eq ${SHUNIT_TRUE} ]; then + return + fi + + _shunit_ok_=${SHUNIT_TRUE} + + # If no exit code was provided, determine an appropriate one. + if ${__SHUNIT_BUILTIN} [ "${__shunit_testsFailed}" -gt 0 -o ${__shunit_testSuccess} -eq ${SHUNIT_FALSE} ]; then + _shunit_ok_=${SHUNIT_FALSE} + fi + + echo + _shunit_msg_="Ran ${__shunit_ansi_cyan}${__shunit_testsTotal}${__shunit_ansi_none}" + if ${__SHUNIT_BUILTIN} [ "${__shunit_testsTotal}" -eq 1 ]; then + ${__SHUNIT_CMD_ECHO_ESC} "${_shunit_msg_} test." + else + ${__SHUNIT_CMD_ECHO_ESC} "${_shunit_msg_} tests." + fi + + if ${__SHUNIT_BUILTIN} [ -n "${__shunit_junitXmlOutputFile}" ]; then + # Calculate total execution time in seconds. + _shunit_suite_execution_time_=`"${__SHUNIT_CMD_CALC}" "${__shunit_endSuiteTime}" - "${__shunit_startSuiteTime}"` + + # Generate a ISO-8601 compliant date. + _shunit_suite_start_time_preformatted_=`date -u '+%Y-%m-%dT%H:%M:%S%z' -d "@${__shunit_startSuiteTime}"` + + echo " +${__shunit_junitXmlTestCases} +" > "${__shunit_junitXmlOutputFile}" + echo + echo "JUnit XML file ${__shunit_junitXmlOutputFile} was saved." + fi + + if ${__SHUNIT_BUILTIN} [ ${_shunit_ok_} -eq ${SHUNIT_TRUE} ]; then + _shunit_msg_="${__shunit_ansi_green}OK${__shunit_ansi_none}" + if ${__SHUNIT_BUILTIN} [ "${__shunit_assertsSkipped}" -gt 0 ]; then + _shunit_msg_="${_shunit_msg_} (${__shunit_ansi_yellow}skipped=${__shunit_assertsSkipped}${__shunit_ansi_none})" + fi + else + _shunit_msg_="${__shunit_ansi_red}FAILED${__shunit_ansi_none}" + _shunit_msg_="${_shunit_msg_} (${__shunit_ansi_red}failures=${__shunit_assertsFailed}${__shunit_ansi_none}" + if ${__SHUNIT_BUILTIN} [ "${__shunit_assertsSkipped}" -gt 0 ]; then + _shunit_msg_="${_shunit_msg_},${__shunit_ansi_yellow}skipped=${__shunit_assertsSkipped}${__shunit_ansi_none}" + fi + _shunit_msg_="${_shunit_msg_})" + fi + + echo + ${__SHUNIT_CMD_ECHO_ESC} "${_shunit_msg_}" + __shunit_reportGenerated=${SHUNIT_TRUE} + + unset _shunit_msg_ _shunit_ok_ _shunit_suite_execution_time_ _shunit_suite_start_time_preformatted_ +} + +# Test for whether a function should be skipped. +# +# Args: +# None +# Returns: +# boolean: whether the test should be skipped (TRUE/FALSE constant) +_shunit_shouldSkip() { + if ${__SHUNIT_BUILTIN} test ${__shunit_skip} -eq ${SHUNIT_FALSE}; then + return ${SHUNIT_FALSE} + fi + _shunit_assertSkip +} + +# Records a successful test. +# +# Args: +# None +_shunit_assertPass() { + __shunit_assertsPassed=`expr "${__shunit_assertsPassed}" + 1` + __shunit_assertsTotal=`expr "${__shunit_assertsTotal}" + 1` + __shunit_assertsCurrentTest=`expr "${__shunit_assertsCurrentTest}" + 1` +} + +# Records a test failure. +# +# Args: +# message: string: failure message to provide user +_shunit_assertFail() { + __shunit_testSuccess=${SHUNIT_FALSE} + _shunit_incFailedCount + + _shunit_xml_message_="`_shunit_escapeXmlData "$@"`" + + __shunit_junitXmlCurrentTestCaseErrors="${__shunit_junitXmlCurrentTestCaseErrors} + " + + if ${__SHUNIT_BUILTIN} [ $# -gt 0 ]; then + ${__SHUNIT_CMD_ECHO_ESC} "${__shunit_ansi_red}ASSERT:${__shunit_ansi_none}$*" + fi + + unset _shunit_xml_message_ +} + +# Increment the count of failed asserts. +# +# Args: +# none +_shunit_incFailedCount() { + __shunit_assertsFailed=`expr "${__shunit_assertsFailed}" + 1` + __shunit_assertsTotal=`expr "${__shunit_assertsTotal}" + 1` + __shunit_assertsCurrentTest=`expr "${__shunit_assertsCurrentTest}" + 1` +} + +# Records a skipped test. +# +# Args: +# None +_shunit_assertSkip() { + __shunit_assertsSkipped=`expr "${__shunit_assertsSkipped}" + 1` + __shunit_assertsTotal=`expr "${__shunit_assertsTotal}" + 1` + __shunit_assertsCurrentTest=`expr "${__shunit_assertsCurrentTest}" + 1` +} + +# Dump the current test metrics. +# +# Args: +# none +_shunit_metrics() { + echo "< \ +total: ${__shunit_assertsTotal} \ +passed: ${__shunit_assertsPassed} \ +failed: ${__shunit_assertsFailed} \ +skipped: ${__shunit_assertsSkipped} \ +>" +} + +# Prepare a script filename for sourcing. +# +# Args: +# script: string: path to a script to source +# Returns: +# string: filename prefixed with ./ (if necessary) +_shunit_prepForSourcing() { + _shunit_script_=$1 + case "${_shunit_script_}" in + /*|./*) echo "${_shunit_script_}" ;; + *) echo "./${_shunit_script_}" ;; + esac + unset _shunit_script_ +} + +# Extract list of functions to run tests against. +# +# Args: +# script: string: name of script to extract functions from +# Returns: +# string: of function names +_shunit_extractTestFunctions() { + _shunit_script_=$1 + + # Extract the lines with test function names, strip of anything besides the + # function name, and output everything on a single line. + _shunit_regex_='^\s*((function test[A-Za-z0-9_-]*)|(test[A-Za-z0-9_-]* *\(\)))' + grep -E "${_shunit_regex_}" "${_shunit_script_}" \ + |command sed 's/^[^A-Za-z0-9_-]*//;s/^function //;s/\([A-Za-z0-9_-]*\).*/\1/g' \ + |xargs + + unset _shunit_regex_ _shunit_script_ +} + +# Escape XML data. +# +# Args: +# data: string: data to escape +# Returns: +# string: escaped data +_shunit_escapeXmlData() { + # Required XML characters to escape are described here: + # http://www.w3.org/TR/REC-xml/#syntax + # https://www.liquid-technologies.com/Reference/Glossary/XML_EscapingData.html + echo "$*" \ + |command sed 's/&/\&/g;s//\>/g;s/"/\"/g'";s/'/\'/g" +} + +#------------------------------------------------------------------------------ +# Main. +# + +# Determine the operating mode. +if ${__SHUNIT_BUILTIN} [ $# -eq 0 -o "${1:-}" = '--' ]; then + __shunit_script=${__SHUNIT_PARENT} + __shunit_mode=${__SHUNIT_MODE_SOURCED} +else + __shunit_script=$1 + if ! ${__SHUNIT_BUILTIN} [ -r "${__shunit_script}" ]; then + _shunit_fatal "unable to read from ${__shunit_script}" + fi + __shunit_mode=${__SHUNIT_MODE_STANDALONE} +fi + +# Create a temporary storage location. +__shunit_tmpDir=`_shunit_mktempDir` + +# Provide a public temporary directory for unit test scripts. +# TODO(kward): document this. +SHUNIT_TMPDIR="${__shunit_tmpDir}/tmp" +if ! command mkdir "${SHUNIT_TMPDIR}"; then + _shunit_fatal "error creating SHUNIT_TMPDIR '${SHUNIT_TMPDIR}'" +fi + +# Configure traps to clean up after ourselves. +trap '_shunit_cleanup EXIT' 0 +trap '_shunit_cleanup INT' 2 +trap '_shunit_cleanup TERM' 15 + +# Create phantom functions to work around issues with Cygwin. +_shunit_mktempFunc +PATH="${__shunit_tmpDir}:${PATH}" + +# Make sure phantom functions are executable. This will bite if `/tmp` (or the +# current `$TMPDIR`) points to a path on a partition that was mounted with the +# 'noexec' option. The noexec command was created with `_shunit_mktempFunc()`. +noexec 2>/dev/null || _shunit_fatal \ + 'Please declare TMPDIR with path on partition with exec permission.' + +# We must manually source the tests in standalone mode. +if ${__SHUNIT_BUILTIN} [ "${__shunit_mode}" = "${__SHUNIT_MODE_STANDALONE}" ]; then + # shellcheck disable=SC1090 + ${__SHUNIT_BUILTIN} . "`_shunit_prepForSourcing \"${__shunit_script}\"`" +fi + +# Configure default output coloring behavior. +_shunit_configureColor "${SHUNIT_COLOR}" + +__shunit_startSuiteTime=`${__SHUNIT_CMD_DATE_SECONDS}` + +# Execute the oneTimeSetUp function (if it exists). +if ! oneTimeSetUp; then + _shunit_fatal "oneTimeSetUp() returned non-zero return code." +fi + +# Command line selected tests or suite selected tests +if ${__SHUNIT_BUILTIN} [ "$#" -ge 2 ]; then + # Argument $1 is either the filename of tests or '--'; either way, skip it. + shift + # Remaining arguments ($2 .. $#) are assumed to be: + # - test function names. + # - configuration options, that is started with the `--` prefix. + # Interate through all remaining args in "$@" in a POSIX (likely portable) way. + # Helpful tip: https://unix.stackexchange.com/questions/314032/how-to-use-arguments-like-1-2-in-a-for-loop + for _shunit_arg_ do + case "${_shunit_arg_}" in + --output-junit-xml=*) + # It is a request for JUnit XML output. + __shunit_junitXmlOutputFile="${_shunit_arg_#--output-junit-xml=}" + ;; + --suite-name=*) + # It is a request for a custom suite name. + __shunit_suiteName="${_shunit_arg_#--suite-name=}" + ;; + --*) + _shunit_fatal "unrecognized option \"${_shunit_arg_}\"" + ;; + *) + # It is the test name, process it in a usual way. + suite_addTest "${_shunit_arg_}" + ;; + esac + done + unset _shunit_arg_ +else + # Execute the suite function defined in the parent test script. + # DEPRECATED as of 2.1.0. + suite +fi + +# If no tests or suite specified, dynamically build a list of functions. +if ${__SHUNIT_BUILTIN} [ -z "${__shunit_suite}" ]; then + shunit_funcs_=`_shunit_extractTestFunctions "${__shunit_script}"` + for shunit_func_ in ${shunit_funcs_}; do + suite_addTest "${shunit_func_}" + done +fi +unset shunit_func_ shunit_funcs_ + +# If suite name is not defined, dynamically generate it from the script name. +if ${__SHUNIT_BUILTIN} [ -z "${__shunit_suiteName}" ]; then + __shunit_suiteName="${__shunit_script##*/}" +fi + +# Prepare the suite name for XML output. +__shunit_xmlSuiteName="`_shunit_escapeXmlData "${__shunit_suiteName}"`" + +# Execute the suite of unit tests. +_shunit_execSuite + +# Execute the oneTimeTearDown function (if it exists). +if ! oneTimeTearDown; then + _shunit_fatal "oneTimeTearDown() returned non-zero return code." +fi + +__shunit_endSuiteTime=`${__SHUNIT_CMD_DATE_SECONDS}` + +# Generate a report summary. +_shunit_generateReport + +# That's it folks. +if ! ${__SHUNIT_BUILTIN} [ "${__shunit_testsFailed}" -eq 0 ]; then + return ${SHUNIT_FALSE} +fi