diff --git a/.devops/cublas.Dockerfile b/.devops/cublas.Dockerfile index ae3da75f5c7..1fae25635d7 100644 --- a/.devops/cublas.Dockerfile +++ b/.devops/cublas.Dockerfile @@ -12,7 +12,7 @@ FROM ${BASE_CUDA_DEV_CONTAINER} as build ARG CUDA_DOCKER_ARCH=all RUN apt-get update && \ - apt-get install -y build-essential git cmake + apt-get install -y build-essential git cmake libsdl2-dev wget git WORKDIR /app @@ -21,8 +21,8 @@ COPY . . # Set nvcc architecture ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH} # Enable cuBLAS -ENV WHISPER_CUBLAS=1 +ENV GGML_CUDA=1 -RUN make +RUN make base.en ENTRYPOINT ["/app/main"] diff --git a/.devops/main-cuda.Dockerfile b/.devops/main-cuda.Dockerfile index bd9a43adb7a..c2bf0fbd1c6 100644 --- a/.devops/main-cuda.Dockerfile +++ b/.devops/main-cuda.Dockerfile @@ -1,6 +1,6 @@ ARG UBUNTU_VERSION=22.04 # This needs to generally match the container host's environment. -ARG CUDA_VERSION=12.3.1 +ARG CUDA_VERSION=13.0.0 # Target the CUDA build image ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} # Target the CUDA runtime image @@ -13,28 +13,38 @@ WORKDIR /app ARG CUDA_DOCKER_ARCH=all # Set nvcc architecture ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH} -# Enable cuBLAS -ENV WHISPER_CUBLAS=1 RUN apt-get update && \ - apt-get install -y build-essential \ + apt-get install -y build-essential libsdl2-dev wget cmake git \ + && apt-get clean \ && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* # Ref: https://stackoverflow.com/a/53464012 -ENV CUDA_MAIN_VERSION=12.3 +ENV CUDA_MAIN_VERSION=13.0 ENV LD_LIBRARY_PATH /usr/local/cuda-${CUDA_MAIN_VERSION}/compat:$LD_LIBRARY_PATH COPY .. . -RUN make +# Enable cuBLAS +RUN make base.en CMAKE_ARGS="-DGGML_CUDA=1 -DCMAKE_CUDA_ARCHITECTURES='75;80;86;90'" + +RUN find /app/build -name "*.o" -delete && \ + find /app/build -name "*.a" -delete && \ + rm -rf /app/build/CMakeFiles && \ + rm -rf /app/build/cmake_install.cmake && \ + rm -rf /app/build/_deps FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime -ENV CUDA_MAIN_VERSION=12.3 +ENV CUDA_MAIN_VERSION=13.0 ENV LD_LIBRARY_PATH /usr/local/cuda-${CUDA_MAIN_VERSION}/compat:$LD_LIBRARY_PATH WORKDIR /app RUN apt-get update && \ - apt-get install -y curl ffmpeg \ + apt-get install -y curl ffmpeg wget cmake git \ + && apt-get clean \ && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* COPY --from=build /app /app +RUN du -sh /app/* +RUN find /app -type f -size +100M +ENV PATH=/app/build/bin:$PATH ENTRYPOINT [ "bash", "-c" ] diff --git a/.devops/main-intel.Dockerfile b/.devops/main-intel.Dockerfile new file mode 100644 index 00000000000..1b5859715d4 --- /dev/null +++ b/.devops/main-intel.Dockerfile @@ -0,0 +1,28 @@ +ARG ONEAPI_VERSION=2025.1.1-0-devel-ubuntu24.04 + +FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build +WORKDIR /app + +RUN apt-get update && \ + apt-get install -y build-essential libsdl2-dev wget cmake git \ + && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* + +COPY .. . +# Enable SYCL +ARG GGML_SYCL_F16=OFF +RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \ + echo "GGML_SYCL_F16 is set" \ + && export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \ + fi && \ + make base.en CMAKE_ARGS="-DGGML_SYCL=1 -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16}" + +FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime +WORKDIR /app + +RUN apt-get update && \ + apt-get install -y curl ffmpeg libsdl2-dev wget cmake git \ + && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* + +COPY --from=build /app /app +ENV PATH=/app/build/bin:$PATH +ENTRYPOINT [ "bash", "-c" ] diff --git a/.devops/main-musa.Dockerfile b/.devops/main-musa.Dockerfile new file mode 100644 index 00000000000..026791e3f89 --- /dev/null +++ b/.devops/main-musa.Dockerfile @@ -0,0 +1,40 @@ +ARG UBUNTU_VERSION=22.04 +# This needs to generally match the container host's environment. +ARG MUSA_VERSION=rc4.2.0 +# Target the MUSA build image +ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION}-amd64 +# Target the MUSA runtime image +ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}-amd64 + +FROM ${BASE_MUSA_DEV_CONTAINER} AS build +WORKDIR /app + +RUN apt-get update && \ + apt-get install -y build-essential libsdl2-dev wget cmake git && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* /tmp/* /var/tmp/* + +COPY .. . +# Enable muBLAS +RUN make base.en CMAKE_ARGS="-DGGML_MUSA=1" + +RUN find /app/build -name "*.o" -delete && \ + find /app/build -name "*.a" -delete && \ + rm -rf /app/build/CMakeFiles && \ + rm -rf /app/build/cmake_install.cmake && \ + rm -rf /app/build/_deps + +FROM ${BASE_MUSA_RUN_CONTAINER} AS runtime +WORKDIR /app + +RUN apt-get update && \ + apt-get install -y curl ffmpeg wget cmake git && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* /tmp/* /var/tmp/* + +COPY --from=build /app/build/bin /app/build/bin +COPY --from=build /app/samples /app/samples +COPY --from=build /app/models /app/models + +ENV PATH=/app/build/bin:$PATH +ENTRYPOINT [ "bash", "-c" ] diff --git a/.devops/main.Dockerfile b/.devops/main.Dockerfile index f923a9063d8..e1eb9b33700 100644 --- a/.devops/main.Dockerfile +++ b/.devops/main.Dockerfile @@ -2,18 +2,19 @@ FROM ubuntu:22.04 AS build WORKDIR /app RUN apt-get update && \ - apt-get install -y build-essential \ + apt-get install -y build-essential wget cmake git \ && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* COPY .. . -RUN make +RUN make base.en FROM ubuntu:22.04 AS runtime WORKDIR /app RUN apt-get update && \ - apt-get install -y curl ffmpeg \ + apt-get install -y curl ffmpeg libsdl2-dev wget cmake git \ && rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* COPY --from=build /app /app +ENV PATH=/app/build/bin:$PATH ENTRYPOINT [ "bash", "-c" ] diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000000..7c5e2438812 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +build*/ +.github/ +.devops/ \ No newline at end of file diff --git a/.github/workflows/bindings-go.yml b/.github/workflows/bindings-go.yml index 13f1950a5ba..ff420f2b636 100644 --- a/.github/workflows/bindings-go.yml +++ b/.github/workflows/bindings-go.yml @@ -10,13 +10,13 @@ on: - whisper.h jobs: - ubuntu-latest: - runs-on: ubuntu-latest + ubuntu-22: + runs-on: ubuntu-22.04 steps: - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v5 with: - go-version: '^1.19' - - uses: actions/checkout@v1 + go-version: '^1.23' + - uses: actions/checkout@v4 - run: | cd bindings/go make test diff --git a/.github/workflows/bindings-ruby.yml b/.github/workflows/bindings-ruby.yml index 902dfe6a2b5..680862fb764 100644 --- a/.github/workflows/bindings-ruby.yml +++ b/.github/workflows/bindings-ruby.yml @@ -1,22 +1,21 @@ name: Bindings Tests (Ruby) + on: push: - paths: - - bindings/ruby/** - - whisper.h + branches: + - master pull_request: - paths: - - bindings/ruby/** - - whisper.h + types: [opened, synchronize, reopened] jobs: - ubuntu-latest: - runs-on: ubuntu-latest + ubuntu-22: + runs-on: ubuntu-22.04 + defaults: + run: + working-directory: bindings/ruby steps: - uses: ruby/setup-ruby@v1 with: - ruby-version: '3.0' - - uses: actions/checkout@v1 - - run: | - cd bindings/ruby/ext - ruby extconf.rb && make + ruby-version: '3.2' + - uses: actions/checkout@v4 + - run: rake test diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2b51051848f..3643aaf279c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,17 +1,129 @@ name: CI -on: [push, pull_request] + +on: + push: + branches: + - master + tags: + - 'v*' + paths: ['.github/workflows/build.yml', + '**/CMakeLists.txt', + '**/Makefile', + '**/*.mk', + '**/*.cmake', + '**/*.in', + '**/*.h', + '**/*.hpp', + '**/*.c', + '**/*.cpp', + '**/*.cu', + '**/*.cuh', + '**/*.cl', + '**/*.swift', + '**/*.m', + '**/*.mm', + '**/*.metal', + '**/*.comp', + '**/*.java'] + + pull_request: + types: [opened, synchronize, reopened] + workflow_dispatch: + inputs: + create_release: + description: 'Create new release' + required: true + type: boolean + pre_release_tag: + description: 'Pre-release tag name' + required: false + type: string + run_type: + description: 'Workflow type to run' + required: true + type: choice + options: + - full-ci + - release-only + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: write # for creating release env: + BRANCH_NAME: ${{ github.head_ref || github.ref_name }} ubuntu_image: "ubuntu:22.04" + VCPKG_BINARY_SOURCES: "clear;x-gha,readwrite" jobs: - ubuntu-latest: + determine-tag: runs-on: ubuntu-latest + outputs: + tag_name: ${{ steps.tag.outputs.name }} + should_release: ${{ steps.tag.outputs.should_release }} + + steps: + - name: Checkout with full history + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Determine tag name + id: tag + shell: bash + run: | + BUILD_NUMBER=$(git rev-list --count HEAD) + SHORT_HASH=$(git rev-parse --short=7 HEAD) + CUSTOM_TAG="${{ github.event.inputs.pre_release_tag }}" + SHOULD_RELEASE="false" + + echo "Raw values:" + echo "BUILD_NUMBER: $BUILD_NUMBER" + echo "SHORT_HASH: $SHORT_HASH" + echo "BRANCH_NAME: ${{ env.BRANCH_NAME }}" + echo "CUSTOM_TAG: $CUSTOM_TAG" + + if [[ "${{ github.ref_type }}" == "tag" ]]; then + echo "Using pushed tag name" + TAG_NAME="${{ github.ref_name }}" + SHOULD_RELEASE="true" + elif [[ -n "$CUSTOM_TAG" ]]; then + echo "Using custom tag" + TAG_NAME="${CUSTOM_TAG}" + SHOULD_RELEASE="true" + elif [[ "${{ github.event.inputs.create_release }}" == "true" ]]; then + echo "Manual release requested" + SHOULD_RELEASE="true" + TAG_NAME="b${BUILD_NUMBER}" + elif [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then + echo "Using master branch format" + TAG_NAME="b${BUILD_NUMBER}" + SHOULD_RELEASE="false" + else + echo "Using non-master branch format" + SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-') + TAG_NAME="${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" + SHOULD_RELEASE="false" + fi + + echo "Final tag name: $TAG_NAME" + echo "Should release: $SHOULD_RELEASE" + echo "name=$TAG_NAME" >> $GITHUB_OUTPUT + echo "should_release=$SHOULD_RELEASE" >> $GITHUB_OUTPUT + + + ubuntu-22: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} + runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: - arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le] + arch: [linux/amd64, linux/ppc64le] steps: - name: Clone @@ -26,54 +138,188 @@ jobs: -v ${{ github.workspace }}:/workspace \ -w /workspace ${{ env.ubuntu_image }} /bin/sh -c ' set -e + export DEBIAN_FRONTEND=noninteractive + sed -i "s|archive.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + sed -i "s|security.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + apt update - apt install -y build-essential libsdl2-dev - make - make stream' + apt install -y build-essential libsdl2-dev cmake git + cmake -B build + cmake --build build --config Release -j $(nproc)' + + ubuntu-22-arm64: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} + runs-on: ubuntu-22.04 + + strategy: + fail-fast: false + matrix: + arch: [linux/arm64] + + steps: + - name: Clone + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Build ${{ matrix.arch }} + run: | + docker run --platform ${{ matrix.arch }} --rm \ + -v ${{ github.workspace }}:/workspace \ + -w /workspace ${{ env.ubuntu_image }} /bin/sh -c ' + set -e + export DEBIAN_FRONTEND=noninteractive + sed -i "s|archive.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + sed -i "s|security.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + + apt-get update + apt-get install -y ca-certificates + sed -i "s|http://ports.ubuntu.com|https://mirror.kumi.systems|g" /etc/apt/sources.list + + apt update + apt install -y build-essential libsdl2-dev cmake git + cmake -B build -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8-a + cmake --build build --config Release -j $(nproc)' + + ubuntu-22-arm-v7: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} + runs-on: ubuntu-22.04 + + strategy: + fail-fast: false + matrix: + arch: [linux/arm/v7] + + steps: + - name: Clone + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Build ${{ matrix.arch }} + run: | + docker run --platform ${{ matrix.arch }} --rm \ + -v ${{ github.workspace }}:/workspace \ + -w /workspace ${{ env.ubuntu_image }} /bin/sh -c ' + set -e + export DEBIAN_FRONTEND=noninteractive + sed -i "s|archive.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + sed -i "s|security.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + + apt-get update + apt-get install -y ca-certificates + sed -i "s|http://ports.ubuntu.com|https://mirror.kumi.systems|g" /etc/apt/sources.list + + apt update + apt install -y build-essential libsdl2-dev cmake git + cmake -B build -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv7-a+fp + cmake --build build --config Release -j $(nproc)' macOS-latest: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} runs-on: macOS-latest + strategy: + matrix: + destination: ['generic/platform=macOS', 'generic/platform=iOS', 'generic/platform=tvOS'] + steps: - name: Clone + id: checkout uses: actions/checkout@v4 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: macOS-latest-swift + evict-old-files: 1d + - name: Dependencies run: | brew update + cmake --version brew install sdl2 - name: Build run: | - make - make stream + sysctl -a + cmake -B build -G Xcode \ + -DGGML_METAL_USE_BF16=ON \ + -DGGML_METAL_EMBED_LIBRARY=ON \ + -DWHISPER_BUILD_EXAMPLES=OFF \ + -DWHISPER_BUILD_TESTS=OFF \ + -DWHISPER_BUILD_SERVER=OFF \ + -DCMAKE_OSX_ARCHITECTURES="arm64;x86_64" + cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) + + +# freeBSD-latest: +# runs-on: macos-13 +# +# steps: +# - name: Clone +# uses: actions/checkout@v4 +# +# - name: Build +# uses: cross-platform-actions/action@v0.27.0 +# with: +# operating_system: freebsd +# version: '14.2' +# run: | +# sudo pkg update +# sudo pkg install -y gmake sdl2 cmake git +# cmake -B build +# cmake --build build --config Release + + ubuntu-22-gcc: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} + runs-on: ubuntu-22.04 - freeBSD-latest: - runs-on: macos-12 + strategy: + fail-fast: false + matrix: + build: [Debug, Release] + arch: [linux/amd64, linux/ppc64le] steps: - name: Clone uses: actions/checkout@v4 - - name: Build - uses: cross-platform-actions/action@v0.24.0 - with: - operating_system: freebsd - version: '13.2' - run: | - sudo pkg update - sudo pkg install -y gmake sdl2 - gmake - gmake stream + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 - ubuntu-latest-gcc: - runs-on: ubuntu-latest + - name: Build ${{ matrix.arch }} + run: | + docker run --platform ${{ matrix.arch }} --rm \ + -v ${{ github.workspace }}:/workspace \ + -w /workspace ${{ env.ubuntu_image }} /bin/sh -c ' + set -e + export DEBIAN_FRONTEND=noninteractive + sed -i "s|archive.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + sed -i "s|security.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + + apt update + apt install -y build-essential cmake libsdl2-dev git + cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} + make + ctest -L gh --output-on-failure' + + ubuntu-22-gcc-arm64: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} + runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: build: [Debug, Release] - arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le] + arch: [linux/arm64] steps: - name: Clone @@ -88,20 +334,71 @@ jobs: -v ${{ github.workspace }}:/workspace \ -w /workspace ${{ env.ubuntu_image }} /bin/sh -c ' set -e + export DEBIAN_FRONTEND=noninteractive + sed -i "s|archive.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + sed -i "s|security.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + + apt-get update + apt-get install -y ca-certificates + sed -i "s|http://ports.ubuntu.com|https://mirror.kumi.systems|g" /etc/apt/sources.list + apt update - apt install -y build-essential cmake libsdl2-dev - cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} + apt install -y build-essential cmake libsdl2-dev git + cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8-a make ctest -L gh --output-on-failure' - ubuntu-latest-clang: - runs-on: ubuntu-latest + ubuntu-22-gcc-arm-v7: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} + runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: build: [Debug, Release] - arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le] + arch: [linux/arm/v7] + + steps: + - name: Clone + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Build ${{ matrix.arch }} + run: | + docker run --platform ${{ matrix.arch }} --rm \ + -v ${{ github.workspace }}:/workspace \ + -w /workspace ${{ env.ubuntu_image }} /bin/sh -c ' + set -e + export DEBIAN_FRONTEND=noninteractive + sed -i "s|archive.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + sed -i "s|security.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + + apt-get update + apt-get install -y ca-certificates + sed -i "s|http://ports.ubuntu.com|https://mirror.kumi.systems|g" /etc/apt/sources.list + + apt update + apt install -y build-essential cmake libsdl2-dev git + cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv7-a+fp + make + ctest -L gh --output-on-failure' + + ubuntu-22-clang: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} + runs-on: ubuntu-22.04 + + strategy: + fail-fast: false + matrix: + build: [Debug, Release] + #arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le] + # TODO: arm/v7 disabled due to clang bug + # https://github.com/ggerganov/whisper.cpp/actions/runs/9657764109/job/26637633042?pr=2256#step:4:1990 + arch: [linux/amd64, linux/arm64, linux/ppc64le] steps: - name: Clone @@ -116,14 +413,24 @@ jobs: -v ${{ github.workspace }}:/workspace \ -w /workspace ${{ env.ubuntu_image }} /bin/sh -c ' set -e + export DEBIAN_FRONTEND=noninteractive + sed -i "s|archive.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + sed -i "s|security.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + + apt-get update + apt-get install -y ca-certificates + sed -i "s|http://ports.ubuntu.com|https://mirror.kumi.systems|g" /etc/apt/sources.list + apt update - apt install -y clang build-essential cmake libsdl2-dev + apt install -y clang build-essential cmake libsdl2-dev git cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang make ctest -L gh --output-on-failure' - ubuntu-latest-gcc-sanitized: - runs-on: ubuntu-latest + ubuntu-22-gcc-sanitized: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} + runs-on: ubuntu-22.04 strategy: fail-fast: false @@ -144,13 +451,21 @@ jobs: -v ${{ github.workspace }}:/workspace \ -w /workspace ${{ env.ubuntu_image }} /bin/sh -c ' set -e + export DEBIAN_FRONTEND=noninteractive + sed -i "s|archive.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + sed -i "s|security.ubuntu.com|mirrors.kernel.org|g" /etc/apt/sources.list + apt update - apt install -y build-essential cmake - cmake . -DCMAKE_BUILD_TYPE=Debug -DWHISPER_SANITIZE_${{ matrix.sanitizer }}=ON + apt install -y build-essential cmake git + cmake . -DCMAKE_BUILD_TYPE=Debug \ + -DWHISPER_SANITIZE_${{ matrix.sanitizer }}=ON \ + -DGGML_OPENMP=OFF make ctest -L gh --output-on-failure' ubuntu-22-cmake-sycl: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} runs-on: ubuntu-22.04 strategy: @@ -180,12 +495,12 @@ jobs: shell: bash run: | sudo apt update - sudo apt install intel-oneapi-compiler-dpcpp-cpp + sudo apt install intel-oneapi-compiler-dpcpp-cpp git - name: install oneAPI MKL library shell: bash run: | - sudo apt install intel-oneapi-mkl-devel + sudo apt install intel-oneapi-mkl-devel git - name: Clone id: checkout @@ -197,10 +512,12 @@ jobs: source /opt/intel/oneapi/setvars.sh mkdir build cd build - cmake -DWHISPER_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx .. + cmake -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx .. cmake --build . --config Release -j $(nproc) ubuntu-22-cmake-sycl-fp16: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} runs-on: ubuntu-22.04 strategy: @@ -230,7 +547,7 @@ jobs: shell: bash run: | sudo apt update - sudo apt install intel-oneapi-compiler-dpcpp-cpp + sudo apt install intel-oneapi-compiler-dpcpp-cpp git - name: install oneAPI MKL library shell: bash @@ -247,10 +564,12 @@ jobs: source /opt/intel/oneapi/setvars.sh mkdir build cd build - cmake -DWHISPER_SYCL_F16=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx .. + cmake -DGGML_SYCL_F16=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx .. cmake --build . --config Release -j $(nproc) windows-msys2: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} runs-on: windows-latest strategy: @@ -271,30 +590,16 @@ jobs: msystem: ${{matrix.sys}} install: >- base-devel + git mingw-w64-${{matrix.env}}-toolchain mingw-w64-${{matrix.env}}-cmake mingw-w64-${{matrix.env}}-SDL2 mingw-w64-${{matrix.env}}-openblas - - name: Build using make - shell: msys2 {0} - run: | - make -j $(nproc) - - - name: Clean after building using make - shell: msys2 {0} - run: | - make clean - - - name: Build using make w/ OpenBLAS - shell: msys2 {0} - run: | - make WHISPER_OPENBLAS=1 -j $(nproc) - - name: Build using CMake shell: msys2 {0} run: | - cmake -B build + cmake -B build -DWHISPER_SDL2=ON cmake --build build --config ${{ matrix.build }} -j $(nproc) - name: Clean after building using CMake @@ -305,11 +610,14 @@ jobs: - name: Build using CMake w/ OpenBLAS shell: msys2 {0} run: | - cmake -B build -DWHISPER_OPENBLAS=ON + cmake -B build -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS cmake --build build --config ${{ matrix.build }} -j $(nproc) windows: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} runs-on: windows-latest + needs: determine-tag strategy: matrix: @@ -344,6 +652,7 @@ jobs: run: > cmake -S . -B ./build -A ${{ matrix.arch }} -DCMAKE_BUILD_TYPE=${{ matrix.build }} + -DBUILD_SHARED_LIBS=ON -DWHISPER_SDL2=${{ matrix.sdl2 }} - name: Build @@ -355,20 +664,52 @@ jobs: if: matrix.sdl2 == 'ON' run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }} - - name: Upload dll + - name: Upload SDL2.dll + if: matrix.sdl2 == 'ON' + uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.s2arc }}_SDL2.dll + path: build/bin/${{ matrix.build }}/SDL2.dll + + - name: Upload whisper dll uses: actions/upload-artifact@v4 with: - name: ${{ matrix.jnaPath }}_whisper.dll + name: whisper_${{ matrix.arch }}.dll path: build/bin/${{ matrix.build }}/whisper.dll + - name: Upload ggml dll + uses: actions/upload-artifact@v4 + with: + name: ggml_${{ matrix.arch }}.dll + path: build/bin/${{ matrix.build }}/ggml.dll + + - name: Upload ggml base dll + uses: actions/upload-artifact@v4 + with: + name: ggml_base_${{ matrix.arch }}.dll + path: build/bin/${{ matrix.build }}/ggml-base.dll + + - name: Upload ggml cpu dll + uses: actions/upload-artifact@v4 + with: + name: ggml_cpu_${{ matrix.arch }}.dll + path: build/bin/${{ matrix.build }}/ggml-cpu.dll + + - name: Pack bin artifacts + shell: pwsh + run: | + Compress-Archive -Path "build/bin/${{ matrix.build }}" -DestinationPath "whisper-bin-${{ matrix.arch }}.zip" + - name: Upload binaries - if: matrix.sdl2 == 'ON' + if: matrix.sdl2 == 'ON' && ${{ needs.determine-tag.outputs.should_release }} uses: actions/upload-artifact@v4 with: - name: whisper-bin-${{ matrix.arch }} - path: build/bin/${{ matrix.build }} + name: whisper-bin-${{ matrix.arch }}.zip + path: whisper-bin-${{ matrix.arch }}.zip windows-blas: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} runs-on: windows-latest strategy: @@ -377,16 +718,14 @@ jobs: arch: [Win32, x64] blas: [ON] sdl2: [ON] + blasver: [0.3.29] include: - arch: Win32 - obzip: https://github.com/OpenMathLib/OpenBLAS/releases/download/v0.3.25/OpenBLAS-0.3.25-x86.zip s2arc: x86 - clblast: OFF + blasfile: x86 - arch: x64 - obzip: https://github.com/OpenMathLib/OpenBLAS/releases/download/v0.3.25/OpenBLAS-0.3.25-x64.zip s2arc: x64 - clblast: ON - clver: 1.6.1 + blasfile: x64_64 - sdl2: ON s2ver: 2.28.5 @@ -394,17 +733,22 @@ jobs: - name: Clone uses: actions/checkout@v4 + - name: Export GitHub Actions cache environment variables + uses: actions/github-script@v7 + with: + script: | + core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || ''); + core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || ''); + - name: Add msbuild to PATH uses: microsoft/setup-msbuild@v2 - - name: Fetch OpenBLAS + - name: Install OpenBLAS and pkgconfiglite if: matrix.blas == 'ON' run: | - C:/msys64/usr/bin/wget.exe -qO blas.zip ${{ matrix.obzip }} - 7z x blas.zip -oblas -y - copy blas/include/cblas.h . - copy blas/include/openblas_config.h . - echo "OPENBLAS_PATH=$env:GITHUB_WORKSPACE/blas" >> $env:GITHUB_ENV + Invoke-WebRequest "https://github.com/OpenMathLib/OpenBLAS/releases/download/v$%7B%7Bmatrix.blasver%7D%7D/OpenBLAS-$%7B%7Bmatrix.blasver%7D%7D_$%7B%7Bmatrix.blasfile%7D%7D.zip" -OutFile "OpenBLAS-${{matrix.blasver}}.zip" + Expand-Archive "OpenBLAS-${{matrix.blasver}}.zip" -DestinationPath "OpenBLAS-${{matrix.blasver}}" + choco install pkgconfiglite - name: Fetch SDL2 and set SDL2_DIR if: matrix.sdl2 == 'ON' @@ -413,119 +757,255 @@ jobs: 7z x sdl2.zip echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV - - name: Install OpenCL - if: matrix.clblast == 'ON' - run: vcpkg.exe --triplet=${{ matrix.arch }}-windows install opencl - - - name: Fetch CLBlast and set CLBlast_DIR - if: matrix.clblast == 'ON' - run: | - C:/msys64/usr/bin/wget.exe -qO clblast.zip https://github.com/CNugteren/CLBlast/releases/download/${{ matrix.clver }}/CLBlast-${{ matrix.clver }}-windows-x64.zip - 7z x clblast.zip - 7z x CLBlast-${{ matrix.clver }}-windows-x64.7z - echo "CLBlast_DIR=$env:GITHUB_WORKSPACE/CLBlast-${{ matrix.clver }}-windows-x64/lib/cmake/CLBlast" >> $env:GITHUB_ENV - - name: Configure run: > cmake -S . -B ./build -A ${{ matrix.arch }} + -DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_INSTALLATION_ROOT/scripts/buildsystems/vcpkg.cmake" -DCMAKE_BUILD_TYPE=${{ matrix.build }} - -DWHISPER_OPENBLAS=${{ matrix.blas }} - -DCMAKE_LIBRARY_PATH="$env:OPENBLAS_PATH/lib" + -DGGML_BLAS=${{ matrix.blas }} + -DGGML_BLAS_VENDOR=OpenBLAS + -DBLAS_LIBRARIES="$env:GITHUB_WORKSPACE/OpenBLAS-${{matrix.blasver}}/lib/libopenblas.lib" + -DBLAS_INCLUDE_DIRS="$env:GITHUB_WORKSPACE/OpenBLAS-${{matrix.blasver}}/include" -DWHISPER_SDL2=${{ matrix.sdl2 }} - -DWHISPER_CLBLAST=${{ matrix.clblast }} - name: Build run: | cd ./build msbuild ALL_BUILD.vcxproj -t:build -p:configuration=${{ matrix.build }} -p:platform=${{ matrix.arch }} - - name: Copy libopenblas.dll + - name: Copy openblas.dll if: matrix.blas == 'ON' - run: copy "$env:OPENBLAS_PATH/bin/libopenblas.dll" build/bin/${{ matrix.build }} + run: copy "$env:GITHUB_WORKSPACE/OpenBLAS-${{matrix.blasver}}/bin/libopenblas.dll" build/bin/${{ matrix.build }} - name: Copy SDL2.dll if: matrix.sdl2 == 'ON' run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }} - - name: Copy clblast.dll - if: matrix.clblast == 'ON' - run: copy "$env:CLBlast_DIR/../../clblast.dll" build/bin/${{ matrix.build }} + - name: Pack bin artifacts + shell: pwsh + run: | + Compress-Archive -Path "build/bin/${{ matrix.build }}" -DestinationPath "whisper-blas-bin-${{ matrix.arch }}.zip" - name: Upload binaries - if: matrix.blas == 'ON' && matrix.sdl2 == 'ON' + if: matrix.blas == 'ON' && matrix.sdl2 == 'ON' && ${{ needs.determine-tag.outputs.should_release }} uses: actions/upload-artifact@v4 with: - name: whisper-blas${{ matrix.clblast == 'ON' && '-clblast' || ''}}-bin-${{ matrix.arch }} - path: build/bin/${{ matrix.build }} + name: whisper-blas-bin-${{ matrix.arch }}.zip + path: whisper-blas-bin-${{ matrix.arch }}.zip windows-cublas: - runs-on: windows-2019 - + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} + runs-on: windows-2022 + needs: determine-tag strategy: + fail-fast: false matrix: build: [Release] arch: [x64] cublas: [ON] sdl2: [ON] - cuda-toolkit: [12.2.0, 11.8.0] + cuda-toolkit: [12.4.0, 11.8.0] include: - arch: x64 - s2arc: x64 - - sdl2: ON - s2ver: 2.28.5 - + sdl2: ON + sdl2_ver: 2.28.5 steps: - - name: Clone + - name: Clone repository uses: actions/checkout@v4 + - name: Install Ninja + id: install_ninja + run: | + choco install ninja + + - name: Install ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: ${{ github.job }}-${{ matrix.cuda-toolkit }}-${{ matrix.build }} + variant: sccache + evict-old-files: 5d + + - name: Install Cuda Toolkit 11.8.0 + if: ${{ matrix.cuda-toolkit == '11.8.0' }} + run: | + $CUDA_VERSION = ${{ matrix.cuda-toolkit }} + $CUDA_TOOLKIT_DIR = "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v$CUDA_VERSION" + $CUDA_DOWNLOAD = "https://developer.download.nvidia.com/compute/cuda/redist" + + # Components versions + $CUDART_VER = "11.8.89" + $NVCC_VER = "11.8.89" + $NVRTC_VER = "11.8.89" + $CUBLAS_VER = "11.8.1.74" + $NVTX_VER = "11.8.86" + $VS_VER = "11.8.86" + $NVPROF_VER = "11.8.87" + $CCCL_VER = "11.8.89" + + # Create the directory where the CUDA Toolkit will be installed + mkdir -p $CUDA_TOOLKIT_DIR + + # Install unzip to extract the downloaded files + choco install unzip -y + + # Download all the required components + curl -O "$CUDA_DOWNLOAD/cuda_cudart/windows-x86_64/cuda_cudart-windows-x86_64-${CUDART_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/cuda_nvcc/windows-x86_64/cuda_nvcc-windows-x86_64-${NVCC_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/cuda_nvrtc/windows-x86_64/cuda_nvrtc-windows-x86_64-${NVRTC_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/libcublas/windows-x86_64/libcublas-windows-x86_64-${CUBLAS_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/cuda_nvtx/windows-x86_64/cuda_nvtx-windows-x86_64-${NVTX_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/visual_studio_integration/windows-x86_64/visual_studio_integration-windows-x86_64-${VS_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/cuda_nvprof/windows-x86_64/cuda_nvprof-windows-x86_64-${NVPROF_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/cuda_cccl/windows-x86_64/cuda_cccl-windows-x86_64-${CCCL_VER}-archive.zip" + + # Extract all the downloaded files to the CUDA Toolkit directory + unzip '*.zip' -d $CUDA_TOOLKIT_DIR + + # Copy all the extracted files to the main CUDA Toolkit directory + xcopy "$CUDA_TOOLKIT_DIR\cuda_cudart-windows-x86_64-${CUDART_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\cuda_nvcc-windows-x86_64-${NVCC_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\cuda_nvrtc-windows-x86_64-${NVRTC_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\libcublas-windows-x86_64-${CUBLAS_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\cuda_nvtx-windows-x86_64-${NVTX_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\cuda_nvprof-windows-x86_64-${NVPROF_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\cuda_cccl-windows-x86_64-${CCCL_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\visual_studio_integration-windows-x86_64-${VS_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + + # Visual Studio integration + xcopy "$CUDA_TOOLKIT_DIR\visual_studio_integration-windows-x86_64-${VS_VER}-archive\visual_studio_integration\MSBuildExtensions\*" "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\MSBuild\Microsoft\VC\v170\BuildCustomizations" /E /I /H /Y + + # Set environment variables + echo "$CUDA_TOOLKIT_DIR\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "$CUDA_TOOLKIT_DIR\libnvvp" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "CUDA_PATH=$CUDA_TOOLKIT_DIR" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + echo "CUDA_PATH_V11_8=$CUDA_TOOLKIT_DIR" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + + - name: Install Cuda Toolkit 12.4.0 + if: ${{ matrix.cuda-toolkit == '12.4.0' }} + run: | + $CUDA_VERSION = ${{ matrix.cuda-toolkit }} + $CUDA_TOOLKIT_DIR = "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v$CUDA_VERSION" + $CUDA_DOWNLOAD = "https://developer.download.nvidia.com/compute/cuda/redist" + + # Components versions + $CUDART_VER = "12.4.127" + $NVCC_VER = "12.4.131" + $NVRTC_VER = "12.4.127" + $CUBLAS_VER = "12.4.5.8" + $NVTX_VER = "12.4.127" + $PROFILER_VER = "12.4.127" + $VS_VER = "12.4.127" + $NVPROF_VER = "12.4.128" + $CCCL_VER = "12.4.127" + + # Create the directory where the CUDA Toolkit will be installed + mkdir -p $CUDA_TOOLKIT_DIR + + # Install unzip to extract the downloaded files + choco install unzip -y + + # Download all the required components + curl -O "$CUDA_DOWNLOAD/cuda_cudart/windows-x86_64/cuda_cudart-windows-x86_64-${CUDART_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/cuda_nvcc/windows-x86_64/cuda_nvcc-windows-x86_64-${NVCC_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/cuda_nvrtc/windows-x86_64/cuda_nvrtc-windows-x86_64-${NVRTC_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/libcublas/windows-x86_64/libcublas-windows-x86_64-${CUBLAS_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/cuda_nvtx/windows-x86_64/cuda_nvtx-windows-x86_64-${NVTX_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/cuda_profiler_api/windows-x86_64/cuda_profiler_api-windows-x86_64-${PROFILER_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/visual_studio_integration/windows-x86_64/visual_studio_integration-windows-x86_64-${VS_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/cuda_nvprof/windows-x86_64/cuda_nvprof-windows-x86_64-${NVPROF_VER}-archive.zip" + curl -O "$CUDA_DOWNLOAD/cuda_cccl/windows-x86_64/cuda_cccl-windows-x86_64-${CCCL_VER}-archive.zip" + + # Extract all the downloaded files to the CUDA Toolkit directory + unzip -q '*.zip' -d $CUDA_TOOLKIT_DIR + + # Copy all the extracted files to the main CUDA Toolkit directory + xcopy "$CUDA_TOOLKIT_DIR\cuda_cudart-windows-x86_64-${CUDART_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\cuda_nvcc-windows-x86_64-${NVCC_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\cuda_nvrtc-windows-x86_64-${NVRTC_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\libcublas-windows-x86_64-${CUBLAS_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\cuda_nvtx-windows-x86_64-${NVTX_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\cuda_nvprof-windows-x86_64-${NVPROF_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\cuda_cccl-windows-x86_64-${CCCL_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\cuda_profiler_api-windows-x86_64-${PROFILER_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + xcopy "$CUDA_TOOLKIT_DIR\visual_studio_integration-windows-x86_64-${VS_VER}-archive\*" "$CUDA_TOOLKIT_DIR" /E /I /H /Y + + # Visual Studio integration + xcopy "$CUDA_TOOLKIT_DIR\visual_studio_integration-windows-x86_64-${VS_VER}-archive\visual_studio_integration\MSBuildExtensions\*" "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\MSBuild\Microsoft\VC\v170\BuildCustomizations" /E /I /H /Y + + # Set environment variables + echo "$CUDA_TOOLKIT_DIR\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "$CUDA_TOOLKIT_DIR\libnvvp" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "CUDA_PATH=$CUDA_TOOLKIT_DIR" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + echo "CUDA_PATH_V12_2=$CUDA_TOOLKIT_DIR" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + - name: Add msbuild to PATH uses: microsoft/setup-msbuild@v2 - - name: Install CUDA Toolkit - id: cuda-toolkit - uses: Jimver/cuda-toolkit@v0.2.15 - with: - cuda: '${{ matrix.cuda-toolkit }}' + - name: Install 7-Zip + run: choco install 7zip -y - name: Fetch SDL2 and set SDL2_DIR if: matrix.sdl2 == 'ON' run: | - C:/msys64/usr/bin/wget.exe -qO sdl2.zip https://github.com/libsdl-org/SDL/releases/download/release-${{ matrix.s2ver }}/SDL2-devel-${{ matrix.s2ver }}-VC.zip + Invoke-WebRequest -Uri https://github.com/libsdl-org/SDL/releases/download/release-${{ matrix.sdl2_ver }}/SDL2-devel-${{ matrix.sdl2_ver }}-VC.zip -OutFile sdl2.zip 7z x sdl2.zip - echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV + echo "SDL2_DIR=${{ github.workspace }}\SDL2-${{ matrix.sdl2_ver }}\cmake" | Out-File -FilePath $env:GITHUB_ENV -Append + echo "${{ github.workspace }}\SDL2-${{ matrix.sdl2_ver }}\cmake" > SDL2_PATH.txt - - name: Configure - run: > - cmake -S . -B ./build -A ${{ matrix.arch }} - -DCMAKE_BUILD_TYPE=${{ matrix.build }} - -DWHISPER_CUDA=${{ matrix.cublas }} - -DWHISPER_SDL2=${{ matrix.sdl2 }} + - name: Install cmake + run: choco install cmake - - name: Build ${{ matrix.cuda-toolkit }} + - name: Build Project + shell: cmd run: | - cd ./build - cmake --build . --config ${{ matrix.build }} + call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat" + cmake --version + where cmake + if "${{ matrix.cuda-toolkit }}" == "11.8.0" ( + set CUDA_FLAGS=-allow-unsupported-compiler -D_ALLOW_COMPILER_AND_STL_VERSION_MISMATCH -D_DISABLE_CONSTEXPR_MUTEX_CONSTRUCTOR + ) else ( + set CUDA_FLAGS= + ) + cmake -S . -B build -G "Ninja Multi-Config" ^ + -DCMAKE_BUILD_TYPE=${{ matrix.build }} ^ + -DGGML_CUDA=${{ matrix.cublas }} ^ + -DWHISPER_SDL2=${{ matrix.sdl2 }} ^ + -DSDL2_DIR="%SDL2_DIR%" ^ + -DCMAKE_POLICY_VERSION_MINIMUM=3.5 ^ + -DCMAKE_CUDA_FLAGS="%CUDA_FLAGS%" + set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1 + cmake --build build --config ${{ matrix.build }} -j %NUMBER_OF_PROCESSORS% + + - name: Check sccache status after build + run: | + sccache --show-stats - name: Copy CUDA DLLs - run: > - Copy-Item -PassThru - -Path "${{ steps.cuda-toolkit.outputs.CUDA_PATH }}/bin/*.dll" - -Include cudart64_*,cublas64_*,cublasLt64_* - -Destination build/bin/${{ matrix.build }} + run: | + Get-ChildItem "$env:CUDA_PATH\bin\" -Filter "*.dll" | + Copy-Item -Destination "build/bin/${{ matrix.build }}" - name: Copy SDL2.dll if: matrix.sdl2 == 'ON' - run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }} + run: copy "$env:SDL2_DIR/../lib/${{ matrix.arch }}/SDL2.dll" build/bin/${{ matrix.build }} + + - name: Pack bin artifacts + shell: pwsh + run: | + Compress-Archive -Path "build/bin/${{ matrix.build }}" -DestinationPath "whisper-cublas-${{ matrix.cuda-toolkit }}-bin-${{ matrix.arch }}.zip" - name: Upload binaries - if: matrix.sdl2 == 'ON' + if: ${{ needs.determine-tag.outputs.should_release }} uses: actions/upload-artifact@v4 with: - name: whisper-cublas-${{ matrix.cuda-toolkit }}-bin-${{ matrix.arch }} - path: build/bin/${{ matrix.build }} + name: whisper-cublas-${{ matrix.cuda-toolkit }}-bin-${{ matrix.arch }}.zip + path: whisper-cublas-${{ matrix.cuda-toolkit }}-bin-${{ matrix.arch }}.zip emscripten: - runs-on: ubuntu-latest + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} + runs-on: ubuntu-22.04 strategy: matrix: @@ -546,15 +1026,16 @@ jobs: emcmake cmake . -DCMAKE_BUILD_TYPE=${{ matrix.build }} make - ios: + ios-xcode-build: runs-on: macos-latest + needs: determine-tag strategy: matrix: build: [Release] steps: - - name: Clone + - name: Checkout code uses: actions/checkout@v4 - name: Configure @@ -562,14 +1043,50 @@ jobs: cp models/for-tests-ggml-base.en.bin models/ggml-base.en.bin mkdir models/ggml-base.en-encoder.mlmodelc + - name: Build + id: cmake_build + run: | + sysctl -a + mkdir build + cd build + cmake -G Xcode .. \ + -DGGML_METAL_USE_BF16=ON \ + -DGGML_METAL_EMBED_LIBRARY=ON \ + -DWHISPER_BUILD_EXAMPLES=OFF \ + -DWHISPER_BUILD_TESTS=OFF \ + -DWHISPER_BUILD_SERVER=OFF \ + -DCMAKE_SYSTEM_NAME=iOS \ + -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \ + -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml + cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO + + - name: xcodebuild for swift package + id: xcodebuild + run: | + ./build-xcframework.sh + - name: Build objc example - run: xcodebuild -project examples/whisper.objc/whisper.objc.xcodeproj -scheme whisper.objc -configuration ${{ matrix.build }} -sdk iphonesimulator build + run: xcodebuild -project examples/whisper.objc/whisper.objc.xcodeproj -scheme whisper.objc -configuration ${{ matrix.build }} -sdk iphoneos CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO FRAMEWORK_FOLDER_PATH=./build-ios build - name: Build swiftui example - run: xcodebuild -project examples/whisper.swiftui/whisper.swiftui.xcodeproj -scheme WhisperCppDemo -configuration ${{ matrix.build }} -sdk iphonesimulator build + run: xcodebuild -project examples/whisper.swiftui/whisper.swiftui.xcodeproj -scheme WhisperCppDemo -configuration ${{ matrix.build }} -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' FRAMEWORK_FOLDER_PATH=./build-ios build + + - name: Pack artifacts + id: pack_artifacts + run: | + zip --symlinks -r whisper-${{ needs.determine-tag.outputs.tag_name }}-xcframework.zip build-apple/whisper.xcframework + + - name: Upload artifacts + if: ${{ needs.determine-tag.outputs.should_release }} + uses: actions/upload-artifact@v4 + with: + path: whisper-${{ needs.determine-tag.outputs.tag_name }}-xcframework.zip + name: whisper-${{ needs.determine-tag.outputs.tag_name }}-xcframework.zip android: - runs-on: ubuntu-latest + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} + runs-on: ubuntu-22.04 steps: - name: Clone @@ -577,12 +1094,6 @@ jobs: with: path: whisper - - name: Clone - uses: actions/checkout@v4 - with: - repository: ggerganov/ggml - path: ggml - - name: Install Java uses: actions/setup-java@v4 with: @@ -601,10 +1112,10 @@ jobs: run: | export PATH_TO_GGML=$PWD/ggml cd whisper/examples/whisper.android - ./gradlew assembleRelease --no-daemon -PGGML_HOME=$PATH_TO_GGML + ./gradlew assembleRelease --no-daemon android_java: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Clone @@ -628,8 +1139,10 @@ jobs: chmod +x ./gradlew ./gradlew assembleRelease - java: - needs: [ 'windows' ] + bindings-java: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} + needs: ['windows'] runs-on: windows-latest steps: - uses: actions/checkout@v4 @@ -640,46 +1153,428 @@ jobs: distribution: zulu java-version: 20 - - name: Download Windows lib + - name: Download Whisper Windows lib + uses: actions/download-artifact@v4 + with: + name: whisper_x64.dll + + - name: Download GGML Windows lib + uses: actions/download-artifact@v4 + with: + name: ggml_x64.dll + + - name: Download GGML Base Windows lib + uses: actions/download-artifact@v4 + with: + name: ggml_base_x64.dll + + - name: Download GGML CPU Windows lib + uses: actions/download-artifact@v4 + with: + name: ggml_cpu_x64.dll + + - name: Download SDL2.dll uses: actions/download-artifact@v4 with: - name: win32-x86-64_whisper.dll - path: bindings/java/build/generated/resources/main/win32-x86-64 + name: x64_SDL2.dll + + - name: List downloaded files + shell: pwsh + run: | + Get-ChildItem -Path "." -Recurse -Filter "*.dll" + + - name: Move DLL to correct location + shell: pwsh + run: | + New-Item -Path "build\bin\Release" -ItemType Directory -Force + + Copy-Item -Path "whisper.dll" -Destination "build\bin\Release\whisper.dll" -Force + Write-Host "Copied whisper.dll to build\bin\Release\whisper.dll directory" + + Copy-Item -Path "ggml.dll" -Destination "build\bin\Release\ggml.dll" -Force + Write-Host "Copied ggml.dll to build\bin\Release\ggml.dll directory" + + Copy-Item -Path "ggml-base.dll" -Destination "build\bin\Release\ggml-base.dll" -Force + Write-Host "Copied ggml-base.dll to build\bin\Release\ggml-base.dll directory" + + Copy-Item -Path "ggml-cpu.dll" -Destination "build\bin\Release\ggml-cpu.dll" -Force + Write-Host "Copied ggml-cpu.dll to build\bin\Release\ggml-cpu.dll directory" + + Copy-Item -Path "SDL2.dll" -Destination "build\bin\Release\SDL2.dll" -Force + Write-Host "Copied SDL2.dll to build\bin\Release\SDL2.dll directory" + + - name: List build release files + shell: pwsh + run: | + Get-ChildItem -Path "build\Release" -Recurse -Filter "*.dll" - name: Build run: | - models\download-ggml-model.cmd tiny.en + models\download-ggml-model.cmd tiny.en models/ cd bindings/java chmod +x ./gradlew - ./gradlew build + ./gradlew build --info + + - name: Pack jar artifacts + shell: pwsh + run: | + Compress-Archive -Path "bindings/java/build/libs/whispercpp-*.jar" -DestinationPath "whispercpp.jar.zip" - name: Upload jar uses: actions/upload-artifact@v4 with: - name: whispercpp.jar - path: bindings/java/build/libs/whispercpp-*.jar + name: whispercpp.jar.zip + path: whispercpp.jar.zip + +# - name: Publish package +# if: ${{ github.ref == 'refs/heads/master' }} +# uses: gradle/gradle-build-action@v2.4.2 +# with: +# arguments: publish +# build-root-directory: bindings/java +# env: +# MAVEN_USERNAME: ${{ secrets.JIRA_USER }} +# MAVEN_PASSWORD: ${{ secrets.JIRA_PASS }} +# PGP_SECRET: ${{ secrets.GPG_PRIVATE_KEY }} +# PGP_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} + + quantize: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} + runs-on: ubuntu-22.04 + + steps: + - name: Clone + uses: actions/checkout@v4 + + - name: Test quantize + run: | + ./models/download-ggml-model.sh tiny.en + cmake -B build + cmake --build build --config Release + ./build/bin/quantize models/ggml-tiny.en.bin models/ggml-tiny.en-q4_0.bin q4_0 + + release: + if: ${{ github.event.inputs.create_release == 'true' || github.event.inputs.pre_release_tag != '' || startsWith(github.ref, 'refs/tags/v') }} + + runs-on: ubuntu-latest + + needs: + - determine-tag + - ios-xcode-build + - windows + - windows-blas + - windows-cublas + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 - - name: Publish package - if: ${{ github.ref == 'refs/heads/master' }} - uses: gradle/gradle-build-action@v2.4.2 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: release + evict-old-files: 1d + + # Downloads all the artifacts from the previous jobs + - name: Download artifacts + id: download-artifact + uses: actions/download-artifact@v4 with: - arguments: publish - build-root-directory: bindings/java + path: ./artifact + + - name: Move artifacts + id: move_artifacts + run: mkdir -p ./artifact/release && mv ./artifact/*/*.zip ./artifact/release + + - name: Create release + id: create_release + uses: ggml-org/action-create-release@v1 env: - MAVEN_USERNAME: ${{ secrets.JIRA_USER }} - MAVEN_PASSWORD: ${{ secrets.JIRA_PASS }} - PGP_SECRET: ${{ secrets.GPG_PRIVATE_KEY }} - PGP_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ needs.determine-tag.outputs.tag_name }} + prerelease: ${{ github.event.inputs.pre_release_tag != '' }} + draft: true - quantize: + - name: Upload release + id: upload_release + uses: actions/github-script@v3 + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + const path = require('path'); + const fs = require('fs'); + const release_id = '${{ steps.create_release.outputs.id }}'; + for (let file of await fs.readdirSync('./artifact/release')) { + if (path.extname(file) === '.zip') { + console.log('uploadReleaseAsset', file); + await github.repos.uploadReleaseAsset({ + owner: context.repo.owner, + repo: context.repo.repo, + release_id: release_id, + name: file, + data: await fs.readFileSync(`./artifact/release/${file}`) + }); + } + } + + coreml-base-en: + if: ${{ (github.event_name == 'push' && github.ref == 'refs/heads/master') || + github.event.inputs.create_release == 'true' || + github.event.inputs.pre_release_tag != '' || + startsWith(github.ref, 'refs/tags/v') }} + runs-on: macos-latest + needs: determine-tag + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set environment variables + id: set_vars + run: | + echo "MODEL_NAME=base.en" >> $GITHUB_ENV + echo "GEN_MODEL_NAME=whisper-${{ needs.determine-tag.outputs.tag_name }}-ggml-base.en-encoder.mlmodelc" >> $GITHUB_ENV + + - name: Download model + run: | + ./models/download-ggml-model.sh ${{ env.MODEL_NAME }} + + - name: Generate CoreML model + run: | + python3.11 -m venv venv + source venv/bin/activate + pip install ane_transformers openai-whisper coremltools + ./models/generate-coreml-model.sh ${{ env.MODEL_NAME }} + + vad: + if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' || + github.event.inputs.run_type == 'full-ci' }} runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Build + shell: bash + run: | + cmake -B build + cmake --build build --config Release + + - name: Test + shell: bash + run: | + ctest -R ^test-vad$ --test-dir build --output-on-failure -VV + +# TODO: simplify the following workflows using a matrix + ggml-ci-x64-cpu-low-perf: + runs-on: ubuntu-22.04 + steps: - name: Clone + id: checkout uses: actions/checkout@v4 - - name: Test quantize + - name: ccache + uses: ggml-org/ccache-action@v1.2.16 + with: + key: ggml-ci-x64-cpu-low-perf + evict-old-files: 1d + + - name: Dependencies + id: depends run: | - ./models/download-ggml-model.sh tiny.en - make quantize - ./quantize models/ggml-tiny.en.bin models/ggml-tiny.en-q4_0.bin q4_0 + sudo apt-get update + sudo apt-get install build-essential libcurl4-openssl-dev + + - name: Test + id: ggml-ci + run: | + LLAMA_ARG_THREADS=$(nproc) GG_BUILD_LOW_PERF=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt + + ggml-ci-arm64-cpu-low-perf: + runs-on: ubuntu-22.04-arm + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: ccache + uses: ggml-org/ccache-action@v1.2.16 + with: + key: ggml-ci-arm64-cpu-low-perf + evict-old-files: 1d + + - name: Dependencies + id: depends + run: | + sudo apt-get update + sudo apt-get install build-essential libcurl4-openssl-dev + + - name: Test + id: ggml-ci + run: | + LLAMA_ARG_THREADS=$(nproc) GG_BUILD_LOW_PERF=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt + + ggml-ci-x64-cpu-high-perf: + runs-on: ubuntu-22.04 + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: ccache + uses: ggml-org/ccache-action@v1.2.16 + with: + key: ggml-ci-x64-cpu-high-perf + evict-old-files: 1d + + - name: Dependencies + id: depends + run: | + sudo apt-get update + sudo apt-get install build-essential libcurl4-openssl-dev + + - name: Test + id: ggml-ci + run: | + LLAMA_ARG_THREADS=$(nproc) bash ./ci/run.sh ./tmp/results ./tmp/mnt + + ggml-ci-arm64-cpu-high-perf: + runs-on: ubuntu-22.04-arm + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: ccache + uses: ggml-org/ccache-action@v1.2.16 + with: + key: ggml-ci-arm64-cpu-high-perf + evict-old-files: 1d + + - name: Dependencies + id: depends + run: | + sudo apt-get update + sudo apt-get install build-essential libcurl4-openssl-dev + + - name: Test + id: ggml-ci + run: | + LLAMA_ARG_THREADS=$(nproc) GG_BUILD_NO_SVE=1 GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt + + ggml-ci-arm64-cpu-high-perf-sve: + runs-on: ubuntu-22.04-arm + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: ccache + uses: ggml-org/ccache-action@v1.2.16 + with: + key: ggml-ci-arm64-cpu-high-perf-sve + evict-old-files: 1d + + - name: Dependencies + id: depends + run: | + sudo apt-get update + sudo apt-get install build-essential libcurl4-openssl-dev + + - name: Test + id: ggml-ci + run: | + LLAMA_ARG_THREADS=$(nproc) GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt + + ggml-ci-x64-nvidia-cuda: + runs-on: [self-hosted, Linux, X64, NVIDIA] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + nvidia-smi + GG_BUILD_CUDA=1 bash ./ci/run.sh ~/results/whisper.cpp /mnt/whisper.cpp + + ggml-ci-x64-nvidia-vulkan-cm: + runs-on: [self-hosted, Linux, X64, NVIDIA] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + vulkaninfo --summary + GG_BUILD_VULKAN=1 GGML_VK_DISABLE_COOPMAT2=1 bash ./ci/run.sh ~/results/whisper.cpp /mnt/whisper.cpp + + ggml-ci-x64-nvidia-vulkan-cm2: + runs-on: [self-hosted, Linux, X64, NVIDIA, COOPMAT2] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + vulkaninfo --summary + GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/whisper.cpp /mnt/whisper.cpp + + ggml-ci-x64-cpu-amx: + runs-on: [self-hosted, Linux, X64, CPU, AMX] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + bash ./ci/run.sh ~/results/whisper.cpp /mnt/whisper.cpp + + ggml-ci-mac-metal: + runs-on: [self-hosted, macOS, ARM64] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + GG_BUILD_METAL=1 bash ./ci/run.sh ~/results/whisper.cpp ~/mnt/whisper.cpp + + ggml-ci-mac-vulkan: + runs-on: [self-hosted, macOS, ARM64] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + vulkaninfo --summary + GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/whisper.cpp ~/mnt/whisper.cpp diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c5a80f269ef..0e2fb1f2b9e 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -11,13 +11,16 @@ jobs: name: Push Docker image to Docker Hub if: github.event.pull_request.draft == false - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 env: COMMIT_SHA: ${{ github.sha }} strategy: + fail-fast: false matrix: config: - - { tag: "main", dockerfile: ".devops/main.Dockerfile", platform: "linux/amd64,linux/arm64" } + - { tag: "main", dockerfile: ".devops/main.Dockerfile", platform: "linux/amd64" } + - { tag: "main-musa", dockerfile: ".devops/main-musa.Dockerfile", platform: "linux/amd64" } + - { tag: "main-intel", dockerfile: ".devops/main-intel.Dockerfile", platform: "linux/amd64" } - { tag: "main-cuda", dockerfile: ".devops/main-cuda.Dockerfile", platform: "linux/amd64" } steps: @@ -26,6 +29,8 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@v3 + with: + image: tonistiigi/binfmt:qemu-v7.0.0-28 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -37,21 +42,35 @@ jobs: username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Build and push Docker image (versioned) - if: github.event_name == 'push' - uses: docker/build-push-action@v5 - with: - context: . - push: true - platforms: ${{ matrix.config.platforms }} - tags: "ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}" - file: ${{ matrix.config.dockerfile }} + - name: Free up disk space + run: | + sudo apt-get remove -y '^dotnet-.*' '^llvm-.*' '^mysql-.*' '^postgresql-.*' + sudo apt-get autoremove -y + sudo apt-get autoclean + + sudo rm -rf /usr/share/dotnet + sudo rm -rf /usr/local/lib/android + sudo rm -rf /opt/ghc + sudo rm -rf /opt/hostedtoolcache/CodeQL + + docker system prune -af + + df -h + + - name: Generate tags + id: tags + run: | + TAGS="ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}" + if [ "${{ github.event_name }}" == "push" ]; then + TAGS="$TAGS,ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}" + fi + echo "tags=$TAGS" >> $GITHUB_OUTPUT - name: Build and push Docker image (tagged) - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: context: . push: ${{ github.event_name == 'push' }} - platforms: ${{ matrix.config.platforms }} - tags: "ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}" + platforms: ${{ matrix.config.platform }} + tags: ${{ steps.tags.outputs.tags }} file: ${{ matrix.config.dockerfile }} diff --git a/.github/workflows/examples-wasm.yml b/.github/workflows/examples-wasm.yml new file mode 100644 index 00000000000..ebbbdfe20ca --- /dev/null +++ b/.github/workflows/examples-wasm.yml @@ -0,0 +1,97 @@ +name: Examples WASM +on: + push: + branches: ["master"] + + workflow_dispatch: + +permissions: + contents: read + pages: write + id-token: write + +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + deploy-wasm-github-pages: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Pages + uses: actions/configure-pages@v4 + + - name: Setup emsdk + uses: mymindstorm/setup-emsdk@v14 + + - name: Build WASM Examples + # Enable for real build later in whisper.cpp + run: | + mkdir -p build-em && cd build-em + emcmake cmake .. -DCMAKE_BUILD_TYPE=Release + make -j + + - name: Create staging directory + run: mkdir -p staging + + - name: Create .nojekyll file in staging directory + run: touch staging/.nojekyll + + - name: Copy application files + run: | + build_dir=build-em/bin + + ls ${build_dir} + + # command.wasm + target_dir=staging/command.wasm + mkdir -p ${target_dir} + cp ${build_dir}/command.wasm/{index.html,command.js,helpers.js} ${target_dir} + cp ${build_dir}/libcommand.js ${target_dir} + + # bench.wasm + target_dir=staging/bench.wasm + mkdir -p ${target_dir} + cp ${build_dir}/bench.wasm/{index.html,bench.js,helpers.js} ${target_dir} + cp ${build_dir}/libbench.js ${target_dir} + + # stream.wasm + target_dir=staging/stream.wasm + mkdir -p ${target_dir} + cp ${build_dir}/stream.wasm/{index.html,stream.js,helpers.js} ${target_dir} + cp ${build_dir}/libstream.js ${target_dir} + + # wchess.wasm + target_dir=staging/wchess.wasm + mkdir -p ${target_dir} + cp -r ${build_dir}/wchess.wasm/{index.html,css,img,js} ${target_dir} + cp ${build_dir}/wchess.wasm.js ${target_dir} + + # whisper.wasm (this will be the main example page) + target_dir=staging + mkdir -p ${target_dir} + cp ${build_dir}/whisper.wasm/{index.html,main.js,helpers.js} ${target_dir} + cp ${build_dir}/libmain.js ${target_dir} + + # Copy Cross-Origin Isolation service worker + cp -v examples/coi-serviceworker.js staging/ + + - name: List files in staging directory (for debugging) + run: | + echo "Files in staging directory:" + find staging -type f | sort + + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: ./staging + + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 808dd18c0b7..74ef8e0faae 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -10,8 +10,8 @@ on: - whisper.h jobs: - addon_node-ubuntu-latest: - runs-on: ubuntu-latest + addon_node-ubuntu-22: + runs-on: ubuntu-22.04 strategy: matrix: node-version: [ 16.x, 18.x ] @@ -22,7 +22,7 @@ jobs: - name: Dependencies run: | sudo apt-get update - sudo apt-get install build-essential + sudo apt-get install build-essential git sudo apt-get install cmake sudo apt-get install libsdl2-dev diff --git a/.gitignore b/.gitignore index e3319ad0329..957eeb75456 100644 --- a/.gitignore +++ b/.gitignore @@ -1,32 +1,30 @@ *.o *.a +*.d .cache/ .coreml/ .test/ +.venv/ .vs/ .vscode/ .DS_Store .vimspector.json /CMakeSettings.json +/talk-llama.dSYM/ build/ -build-blas/ -build-coreml/ -build-em/ -build-debug/ -build-release/ -build-rwdi/ -build-static/ -build-cublas/ -build-no-accel/ -build-sanitize-addr/ -build-sanitize-thread/ +build-*/ +build_*/ +tmp/ # SPM .build/ .swiftpm *.metallib +ggml-metal-embed.metal +ggml-metal-embed.metal.tmp + /main /stream /command @@ -53,6 +51,8 @@ extra/bench-gg.txt models/*.mlmodel models/*.mlmodelc models/*.mlpackage +models/*-encoder-openvino.xml +models/*-encoder-openvino-cache/ bindings/java/.gradle/ bindings/java/.idea/ .idea/ @@ -62,3 +62,5 @@ cmake-build-debug/ .cxx/ .gradle/ local.properties +.log +.exe diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 94d7ce1b3cc..00000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "bindings/ios"] - path = bindings/ios - url = https://github.com/ggerganov/whisper.spm diff --git a/AUTHORS b/AUTHORS index 33e6c9649c7..f523e0a7224 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,34 +1,51 @@ -# date: Tue Apr 9 20:27:03 EEST 2024 +# date: Tue Feb 4 13:03:35 EET 2025 # this file is auto-generated by scripts/gen-authors.sh 0/0 0cc4m 0xsourcecode <134374803+0xsourcecode@users.noreply.github.com> +65a <10104049+65a@users.noreply.github.com> +AIWintermuteAI <32562299+AIWintermuteAI@users.noreply.github.com> AT Aarni Koskela Aaron Pham <29749331+aarnphm@users.noreply.github.com> Aaron Taylor Abhilash Majumder <30946547+abhilash1910@users.noreply.github.com> Abitofevrything <54505189+abitofevrything@users.noreply.github.com> +Adam Jones +Adrien Gallouët +Adrien Gallouët AfryMask Ahmad Bilal +Ahmad Tameem <113388789+Tameem-10xE@users.noreply.github.com> AidanBeltonS <87009434+AidanBeltonS@users.noreply.github.com> +AidanBeltonS +Akarshan Biswas +Akarshan Biswas Akash Mahajan Akash Mahajan Al Hoang <3811822-hoanga@users.noreply.gitlab.com> Alan +Albert Jin +Alberto Cabrera Pérez +Alberto Cabrera Pérez Aleksander Andrzejewski <18704749+aleksanderandrzejewski@users.noreply.github.com> Alex Azarov Alex Bacart <13940752+alex-bacart@users.noreply.github.com> Alex Evgrashin +Alex O'Connell <35843486+acon96@users.noreply.github.com> Alexandr Graschenkov Alexandru Mariuti Alexey Kharlamov Alfredo Montesinos Ali Alameh +Alter <0x7c48@gmail.com> Ananta Bastola +Andreas Kieslinger <47689530+aendk@users.noreply.github.com> +Andreas Lubbe Andreu Huguet Andrew Huynh +Andrew Minh Nguyen <40281306+amqdn@users.noreply.github.com> Andrew S Andy Maloney Anton Kostin @@ -40,8 +57,11 @@ AustinMroz Avik Sengupta Bader-eddine Ouaich <49657842+baderouaich@users.noreply.github.com> Baffin Lee +Ben Ashbaugh Ben Nortier Benjamin Heiniger +Bernhard M. Wiedemann +Binozo <70137898+Binozo@users.noreply.github.com> Bo-Yi Wu Boris Bliznioukov Borislav Stanimirov @@ -49,47 +69,86 @@ Brad Murray <59848399+bradmurray-dt@users.noreply.github.com> Brian Murray CRD716 Canis Lupus +Carlos Zoido Carolinabanana <140120812+Carolinabanana@users.noreply.github.com> +CarterLi999 <664681047@qq.com> ChangSeok Oh +Changyeon Kim Chaoqun <27287694+OpenWaygate@users.noreply.github.com> +Charles Xu <63788048+chaxu01@users.noreply.github.com> +Charles Xu +Chen Xi +Chen Xi +Chenguang Li <87689256+noemotiovon@users.noreply.github.com> Chia-Hsiang Cheng <88014292+garychia@users.noreply.github.com> Chidi Williams +Chris Elrod Christian <12550267+iceychris@users.noreply.github.com> +Christian Kastner Clifford Heath +Clint Herron Colin +Conrad Kramer +Corey Earwood +CrispStrobe <154636388+CrispStrobe@users.noreply.github.com> +DAN™ DGdev91 Damian Czaja +Dan Johansson <164997844+eddnjjn@users.noreply.github.com> +Dan Johansson Daniel Bevenius +Daniel Valdivia <18384552+dvaldivia@users.noreply.github.com> +Daniel Ziegenberg +Daniele <57776841+daniandtheweb@users.noreply.github.com> +Dave +Dave Airlie +Dave Airlie +Daven Sanassy David David Thorpe +DavidKorczynski Davidson Francis Dener Stassun +Dibakar Gope Didzis Gosko +Diego Devesa Digipom Dimo +Djip007 <3705339+Djip007@users.noreply.github.com> +Djip007 Dody Suria Wijaya +Dou Xinpeng <15529241576@163.com> +Dou Xinpeng <81913537+Dou-Git@users.noreply.github.com> Dr. Tom Murphy VII Ph.D <499244+tom7@users.noreply.github.com> Duncan McConnell Egor Egorov Elkana Bardugo Emmanuel Schmidbauer Engininja2 <139037756+Engininja2@users.noreply.github.com> +Eric Curtin Eric Swanson Eric Tendian +Eric Zhang <34133756+EZForever@users.noreply.github.com> Erik Scholz Evan Jones Evan Martin Eve <139727413+netrunnereve@users.noreply.github.com> Evgeny Kuznetsov F1L1P <78918286+F1L1Pv2@users.noreply.github.com> +Faisal Zaghloul Fangjun Kuang Felix Finn Voorhees +FirstTimeEZ <179362031+FirstTimeEZ@users.noreply.github.com> FlippFuzz <41221030+FlippFuzz@users.noreply.github.com> +Frankie Robertson Gang Chen Gavin Cai George Hindle Georgi Gerganov +Gilad S <7817232+giladgd@users.noreply.github.com> +Gilad S +Gilad S. <7817232+giladgd@users.noreply.github.com> GitAritron <103900385+GitAritron@users.noreply.github.com> GiviMAD Gleicon Moraes @@ -98,41 +157,66 @@ Guillaume Wenzek HY. Kelvin Lee <34256578+hykelvinlee42@users.noreply.github.com> Halalaluyafail3 <55773281+Halalaluyafail3@users.noreply.github.com> Hang +Haus1 Herman Semenov +HimariO +Hong Bo PENG Hrishikesh Barman +Hugo Ian Bicking Ian Bull +Ihar Hrachyshka Ikko Ashimine +Ikko Eltociear Ashimine InconsolableCellist <23345188+InconsolableCellist@users.noreply.github.com> Ismatulla Mansurov <47342870+sapoepsilon@users.noreply.github.com> +Ivan +Ivan Filipov <159561759+vanaka11@users.noreply.github.com> Ivan Gorin +Ivo von Putzer Reibegg JJ <103335846+computerscienceiscool@users.noreply.github.com> Jack Mousseau JacobLinCool Jakub Ráček Jared Van Bortel Jay Binks +Jayant +Jeff Bolz +Jeroen Mostert Jhen-Jie Hong Jhen-Jie Hong JidongZhang-THU <1119708529@qq.com> Jo Liss +Joe Todd Johan Johannes Gäßler John Balis +JohnnyB Jonathan Soo Jonno <1160532+razodactyl@users.noreply.github.com> Joonas Pihlajamaa Jose <34888496+Jerry-Master@users.noreply.github.com> Josh Bleecher Snyder +Josscii Judd Jumper775 <78500318+jumpers775@users.noreply.github.com> +Jun Hee Yoo +Junil Kim +Justina Cho Justine Tunney +Justine Tunney +KITAITI Makoto KP Kaiser Kamilake +Karol Kontny <82021046+kkontny@users.noreply.github.com> +Karthick Kartik Saranathan <278928+Kartiku@users.noreply.github.com> Kasumi <90275229+kasumi-1@users.noreply.github.com> Kawrakow <48489457+ikawrakow@users.noreply.github.com> +Kendrick Taylor Kevin Brothaler +Kevin Gibbons +Konosuke Sakai Konstantin Zhuravlyov Kreijstal Kylin <56434533+KyL0N@users.noreply.github.com> @@ -147,56 +231,110 @@ Luis Herrera Lukas Rist M. A. Ali <73258591+MightyStud@users.noreply.github.com> M. Eren Akbiyik +Ma Mingfei Maciek +Mahesh Madhav <67384846+heshpdx@users.noreply.github.com> Marcin Mielniczuk +Mark Karpelès +Mark Zhuang +Markus Tavenrath +Martin Delille Martin Warnaar +Masaya, Kato <62578291+msy-kato@users.noreply.github.com> Matheus de Sousa <23645013+keyehzy@users.noreply.github.com> +Mathieu Baudier Mathijs de Bruin Matija Pevec +Matt Stephenson +Max Krasnyansky +Max Krasnyansky Maximiliano Levi <8160966+maxilevi@users.noreply.github.com> Meng, Hengyu +Mengqing Cao Michael Podvitskiy Michael Rienstra Mikhail Grigorev Mohammadreza Hendiani Mohit Agarwal +Molly Sophia Murilo Santana +NETZkultur GmbH +Natsu Neil Chudleigh +Neo Zhang <14088817+arthw@users.noreply.github.com> Neo Zhang Jianyu Neuman Vong +Nicholai Tukanov Nicholas Albion +Nico Bosshard +Nicolò Scipione Niels Mayer +Nikita Sarychev <42014488+sARY77@users.noreply.github.com> +Nikolaj Olsson Okabintaro <103938900+Okabintaro@users.noreply.github.com> Oleg Sidorov Oleg Sidorov +Olivier Chafik Ondrej Kokes Ouadie EL FAROUKI +PAB Paul Tsochantaris +Pedro Probst +Peng +Peter Philipp Zabel Philippe Normand +Philippe Normand +Plamen Minev +Prashant Vithule <119530321+Vithulep@users.noreply.github.com> Przemysław Pawełczyk Qianhe Chen <54462604+chenqianhe@users.noreply.github.com> +R0CKSTAR +R0CKSTAR +Radoslav Gerganov Radosław Gryta +Rahul Vadhyar <107788610+RahulVadhyar@users.noreply.github.com> +Raiya Araki <83504221+rai62@users.noreply.github.com> Reinforce-II Reinis Muiznieks RelatedTitle +Rémy Oudompheng RhinoDevel Rich Jones +Robert Ormandi <52251610+ormandi@users.noreply.github.com> Robin Roddur Dasgupta Roland Rabien +Romain Biessy +Ronsor Rotem Dan Ryan Hitchman Ryan Metcalfe <107415876+RyanMetcalfeInt8@users.noreply.github.com> RyanChang +SRHMorris <69468379+SRHMorris@users.noreply.github.com> +SXX +Sacha Arbonel +Salman Faroz +Salvatore Mesoraca Sam <49637763+Onlyartist9@users.noreply.github.com> Sam Pullara +Samuel Durante <44513615+samueldurantes@users.noreply.github.com> Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> +Sandro Hanea <40202887+sandrohanea@users.noreply.github.com> +Sergio López Sergio López +Shanshan Shen <467638484@qq.com> +Shijie <821898965@qq.com> +Shupei Fan Siddharth Ramakrishnan +Sigbjørn Skjæret Simon Moisselin Sindre Sorhus Slava Primenko +Srihari-mcw <96763064+Srihari-mcw@users.noreply.github.com> +Stavros Panakakis <53979866+Stavrospanakakis@users.noreply.github.com> +Stefan Sydow +Stefan Sydow Syahmi Azhar Syed Jafri Sơn Phan Trung @@ -205,37 +343,63 @@ Takeshi Inoue Tamotsu Takahashi Taras Glek Tauseef Mohiuddin <35351464+tauseefmohammed2@users.noreply.github.com> +Thamster Thijs Raymakers Thomas Fitzsimmons Tiago Fassoni Tienshiao Ma +Tim Miller Timothy Cronin <40186632+4imothy@users.noreply.github.com> Tobrun Todd +Toliver Tong Li <31761981+litongjava@users.noreply.github.com> +Tony Wasserka <4840017+neobrain@users.noreply.github.com> Topping1 <78745143+Topping1@users.noreply.github.com> Travis Cline UEXTM.com <84163508+uextm@users.noreply.github.com> +UsernamesLame <156965854+UsernamesLame@users.noreply.github.com> Vadim Peretokin Valentin Gosu <1454649+valenting@users.noreply.github.com> +Vin Misra Vulcan <93451215+trholding@users.noreply.github.com> WhiteOlivierus <36532695+WhiteOlivierus@users.noreply.github.com> +William Tambellini +William Tambellini +Wilson Silva Xiang (Kevin) Li Xiao-Yong Jin XiaotaoChen +Xingchen Song(宋星辰) +Xinpeng Dou <81913537+Dou-Git@users.noreply.github.com> +Xuan Son Nguyen Yajing Tang Yang Shen Yunès +Yuri Khrustalev +Yusuf Redžić <48274562+redzic@users.noreply.github.com> ZaBlazzingZephyrus <119159668+blazingzephyr@users.noreply.github.com> +Zhenwei Jin <109658203+kylo5aby@users.noreply.github.com> +Zhiyuan Li +Zhiyuan Li Zigfrid Zvezdin Zollner <24618122+Zolliner@users.noreply.github.com> +a3sh <38979186+A3shTnT@users.noreply.github.com> +ag2s20150909 <19373730+ag2s20150909@users.noreply.github.com> +agray3 ai-at-home <149282006+ai-at-home@users.noreply.github.com> +aldorof alonfaraj +amd-dwang +amritahs-ibm andypayne ardfork <134447697+ardfork@users.noreply.github.com> +arizhih <40765267+arizhih@users.noreply.github.com> automaticcat +bandoti <141645996+bandoti@users.noreply.github.com> be-next bert hubert +billyct bmwl bobqianic <129547291+bobqianic@users.noreply.github.com> bocytko @@ -248,7 +412,9 @@ byte-6174 <88070277+byte-6174@users.noreply.github.com> cdosoftei clach04 compilade <113953597+compilade@users.noreply.github.com> +compilade conradg +crummyh ddpasa <112642920+ddpasa@users.noreply.github.com> denersc dscripka @@ -256,28 +422,55 @@ duthils ecneladis faker fitzsim +fj-y-saito <85871716+fj-y-saito@users.noreply.github.com> fraxy-v <65565042+fraxy-v@users.noreply.github.com> genevera (she/her) geniusnut +gilbertgong +gn64 +goldwaving <77494627+goldwaving@users.noreply.github.com> greeshmay +haopeng <657407891@qq.com> +hipudding +hsinhoyeh hydai iamthad +issixx <46835150+issixx@users.noreply.github.com> james wolf +jdomke <28772296+jdomke@users.noreply.github.com> +jettoblack +jiez <373447296@qq.com> joecryptotoo <80373433+joecryptotoo@users.noreply.github.com> jorismertz <35079666+jorismertz@users.noreply.github.com> +junchao-loongson <68935141+junchao-loongson@users.noreply.github.com> junkfood <69683722+JunkFood02@users.noreply.github.com> jwijffels +k.h.lai kamranjon katsu560 kennethge <57784063+kenneth-ge@users.noreply.github.com> keyehzy +kunnis +l3utterfly leejet +leo-pony +lhez litong <31761981+litongjava@users.noreply.github.com> +liuwei-git <14815172+liuwei-git@users.noreply.github.com> lnyan +luoyu-intel m.bell +mahorozte <41834471+mahorozte@users.noreply.github.com> +mashizora <30516315+mashizora@users.noreply.github.com> +matt23654 +matteo +mgrachten mkiol +mky_coder <47767389+mkycoder@users.noreply.github.com> novag <7754358+novag@users.noreply.github.com> pajowu +pengxin99 +petterreinholdtsen polarmoon <90010972+polarmoon@users.noreply.github.com> rlapray sandrohanea <40202887+sandrohanea@users.noreply.github.com> @@ -287,15 +480,31 @@ shikokuchuo <53399081+shikokuchuo@users.noreply.github.com> slaren slashlib snadampal <87143774+snadampal@users.noreply.github.com> +someone13574 <81528246+someone13574@users.noreply.github.com> st-gr <38470677+st-gr@users.noreply.github.com> +stduhpf +stormofice <58337328+stormofice@users.noreply.github.com> texmex76 <40733439+texmex76@users.noreply.github.com> thefinaldegree +thewh1teagle <61390950+thewh1teagle@users.noreply.github.com> +toboil-features <160222185+toboil-features@users.noreply.github.com> trixirt ulatekh undef +uvos +uvos +valVk venkr vicalloy +wangshuai09 <391746016@qq.com> +woachk <24752637+woachk@users.noreply.github.com> +xctan xdrudis +yuri@FreeBSD +zhangjixiong +zhentaoyu zhouwg <6889919+zhouwg@users.noreply.github.com> +zhouwg +谢乃闻 布客飞龙 <562826179@qq.com> Артём Земляк diff --git a/CMakeLists.txt b/CMakeLists.txt index 4055c2bbd87..517f30bb6da 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,25 +1,31 @@ -cmake_minimum_required (VERSION 3.5) +cmake_minimum_required(VERSION 3.5) # for add_link_options and implicit target directories. +project("whisper.cpp" C CXX) +project("whisper.cpp" VERSION 1.8.2) +include(CheckIncludeFileCXX) -# Allow for the creation of solution folders. -set_property(GLOBAL PROPERTY USE_FOLDERS ON) - -project(whisper.cpp VERSION 1.6.2) set(SOVERSION 1) +#set(CMAKE_WARN_DEPRECATED YES) +set(CMAKE_WARN_UNUSED_CLI YES) + +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE) + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") +endif() + # Add path to modules list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/") set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) -if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) +if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) set(WHISPER_STANDALONE ON) - include(GitVars) - include(BuildTypes) + + include(git-vars) # configure project version - if (EXISTS "${CMAKE_SOURCE_DIR}/bindings/ios/Makefile-tmpl") - configure_file(${CMAKE_SOURCE_DIR}/bindings/ios/Makefile-tmpl ${CMAKE_SOURCE_DIR}/bindings/ios/Makefile @ONLY) - endif() configure_file(${CMAKE_SOURCE_DIR}/bindings/javascript/package-tmpl.json ${CMAKE_SOURCE_DIR}/bindings/javascript/package.json @ONLY) else() set(WHISPER_STANDALONE OFF) @@ -29,6 +35,16 @@ if (EMSCRIPTEN) set(BUILD_SHARED_LIBS_DEFAULT OFF) option(WHISPER_WASM_SINGLE_FILE "whisper: embed WASM inside the generated whisper.js" ON) + + # TODO: without these, we get the following error: + # wasm-ld: error: --shared-memory is disallowed by whisper.cpp.o because it was not compiled with 'atomics' or 'bulk-memory' features. + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread") + + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -s TOTAL_STACK=5242880") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -s TOTAL_STACK=5242880") + + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated") else() if (MINGW) set(BUILD_SHARED_LIBS_DEFAULT OFF) @@ -37,795 +53,203 @@ else() endif() endif() -# options +option(BUILD_SHARED_LIBS "build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT}) -if (APPLE) - set(WHISPER_METAL_DEFAULT ON) -else() - set(WHISPER_METAL_DEFAULT OFF) -endif() - -option(BUILD_SHARED_LIBS "whisper: build shared libs" ${BUILD_SHARED_LIBS_DEFAULT}) +# +# option list +# +# debug option(WHISPER_ALL_WARNINGS "whisper: enable all compiler warnings" ON) option(WHISPER_ALL_WARNINGS_3RD_PARTY "whisper: enable all compiler warnings in 3rd party libs" OFF) -option(WHISPER_SANITIZE_THREAD "whisper: enable thread sanitizer" OFF) -option(WHISPER_SANITIZE_ADDRESS "whisper: enable address sanitizer" OFF) -option(WHISPER_SANITIZE_UNDEFINED "whisper: enable undefined sanitizer" OFF) - -option(WHISPER_BUILD_TESTS "whisper: build tests" ${WHISPER_STANDALONE}) -option(WHISPER_BUILD_EXAMPLES "whisper: build examples" ${WHISPER_STANDALONE}) - -option(WHISPER_SDL2 "whisper: support for libSDL2" OFF) - -if (CMAKE_SYSTEM_NAME MATCHES "Linux") - option(WHISPER_FFMPEG "whisper: support building and linking with ffmpeg libs (avcodec, swresample, ...)" OFF) -endif() - -option(WHISPER_NO_AVX "whisper: disable AVX" OFF) -option(WHISPER_NO_AVX2 "whisper: disable AVX2" OFF) -option(WHISPER_NO_AVX512 "whisper: disable AVX512" ON) -option(WHISPER_NO_AVX512_VBMI "whisper: disable AVX512-VBMI" ON) -option(WHISPER_NO_AVX512_VNNI "whisper: disable AVX512-VNNI" ON) -option(WHISPER_NO_FMA "whisper: disable FMA" OFF) -option(WHISPER_NO_F16C "whisper: disable F16c" OFF) - -option(WHISPER_OPENVINO "whisper: support for OpenVINO" OFF) - -if (APPLE) - option(WHISPER_NO_ACCELERATE "whisper: disable Accelerate framework" OFF) - option(WHISPER_METAL "whisper: use Metal" ${WHISPER_METAL_DEFAULT}) - option(WHISPER_METAL_NDEBUG "whisper: disable Metal debugging" OFF) - option(WHISPER_COREML "whisper: enable Core ML framework" OFF) - option(WHISPER_COREML_ALLOW_FALLBACK "whisper: allow non-CoreML fallback" OFF) - option(WHISPER_METAL_EMBED_LIBRARY "whisper: embed Metal library" OFF) - option(WHISPER_BLAS "whisper: use BLAS" ON) - set (WHISPER_BLAS_VENDOR "Apple" CACHE STRING - "whisper: BLAS library vendor") -else() - option(WHISPER_CUDA "whisper: support for CUDA" OFF) - option(WHISPER_CUDA_FA_ALL_QUANTS "whisper: compile all quants for FlashAttention" OFF) - option(WHISPER_CUBLAS "whisper: support for CUDA (deprecated)" OFF) - option(WHISPER_HIPBLAS "whisper: support for hipBLAS" OFF) - option(WHISPER_CLBLAST "whisper: use CLBlast" OFF) - option(WHISPER_MKL "whisper: use Intel Math Kernel Library (MKL)" OFF) - option(WHISPER_SYCL "whisper: use SYCL" OFF) - option(WHISPER_SYCL_F16 "whisper: use 16 bit floats for sycl calculations" OFF) - option(WHISPER_BLAS "whisper: use BLAS" OFF) - set (WHISPER_BLAS_VENDOR "Generic" CACHE STRING - "whisper: BLAS library vendor") -endif() - -option(WHISPER_PERF "whisper: enable perf timings" OFF) +# build +option(WHISPER_FATAL_WARNINGS "whisper: enable -Werror flag" OFF) +option(WHISPER_USE_SYSTEM_GGML "whisper: use system-installed GGML library" OFF) # sanitizers +option(WHISPER_SANITIZE_THREAD "whisper: enable thread sanitizer" OFF) +option(WHISPER_SANITIZE_ADDRESS "whisper: enable address sanitizer" OFF) +option(WHISPER_SANITIZE_UNDEFINED "whisper: enable undefined sanitizer" OFF) -if (NOT MSVC) - if (WHISPER_SANITIZE_THREAD) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=thread") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=thread") - endif() - - if (WHISPER_SANITIZE_ADDRESS) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address -fno-omit-frame-pointer") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address -fno-omit-frame-pointer") - endif() - - if (WHISPER_SANITIZE_UNDEFINED) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined") - endif() -endif() - -#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -ffast-math") -#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native") - -# dependencies - -find_package(Threads REQUIRED) +# extra artifacts +option(WHISPER_BUILD_TESTS "whisper: build tests" ${WHISPER_STANDALONE}) +option(WHISPER_BUILD_EXAMPLES "whisper: build examples" ${WHISPER_STANDALONE}) +option(WHISPER_BUILD_SERVER "whisper: build server example" ${WHISPER_STANDALONE}) -#compile flag sycl -if (WHISPER_SYCL) - set(CMAKE_CXX_STANDARD 17) -else() - set(CMAKE_CXX_STANDARD 11) -endif() - -if (WHISPER_FFMPEG) - # As of cmake 3.27, there is no official cmake support for FindFFmpeg. - # Consequnelty we added a FindFFmpeg.cmake script the cmake subfolder: - # whisper.cpp does not need the full ffmpeg libs, just AVFORMAT AVCODEC AVUTIL SWRESAMPLE - # libswresample performs highly optimized audio resampling, rematrixing and sample format conversion operations - # libavcodec provides a generic encoding/decoding framework and contains multiple decoders and encoders for audio, video and subtitle streams, and several bitstream filters. - # libavformat provides a generic framework for multiplexing and demultiplexing (muxing and demuxing) audio, video and subtitle streams. - find_package(FFmpeg REQUIRED) - if (NOT ${FFMPEG_FOUND}) - message(FATAL_ERROR "Cannot find ffmpeg libs/headers") - endif() - message(STATUS "Found ffmpeg libs: ${FFMPEG_LIBRARIES}") - message(STATUS "Found ffmpeg headers in: ${FFMPEG_INCLUDE_DIRS}") - message(STATUS "ffmpeg definitions: ${FFMPEG_DEFINITIONS}") - message(STATUS "Found avformat ${AVFORMAT_VERSION}") - include_directories(${FFMPEG_INCLUDE_DIRS}) - add_compile_definitions(WHISPER_FFMPEG) - set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${FFMPEG_LIBRARIES}) -endif() - -# on APPLE -if (APPLE) - # include Accelerate framework - if (NOT WHISPER_NO_ACCELERATE) - find_library(ACCELERATE_FRAMEWORK Accelerate) +# 3rd party libs +option(WHISPER_CURL "whisper: use libcurl to download model from an URL" OFF) +option(WHISPER_SDL2 "whisper: support for libSDL2" OFF) - if (ACCELERATE_FRAMEWORK) - message(STATUS "Accelerate framework found") - - set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${ACCELERATE_FRAMEWORK}) - set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64) - else() - message(FATAL_ERROR "Accelerate framework not found") - endif() - endif() - - if (WHISPER_METAL) - find_library(FOUNDATION_LIBRARY Foundation REQUIRED) - find_library(METAL_FRAMEWORK Metal REQUIRED) - find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) - - if (METAL_FRAMEWORK) - message(STATUS "Metal framework found") - - set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} - ${FOUNDATION_LIBRARY} - ${METAL_FRAMEWORK} - ${METALKIT_FRAMEWORK} - ) - set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_METAL) - - if (WHISPER_METAL_NDEBUG) - set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_METAL_NDEBUG) - endif() - else() - message(FATAL_ERROR "Metal framework not found") - endif() - - set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h) - - # copy ggml-common.h and ggml-metal.metal to bin directory - configure_file(ggml-common.h bin/ggml-common.h COPYONLY) - configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY) - - if (WHISPER_METAL_EMBED_LIBRARY) - enable_language(ASM) - set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_METAL_EMBED_LIBRARY) - - set(METALLIB_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal") - set(COMMON_HEADER "${CMAKE_CURRENT_SOURCE_DIR}/ggml-common.h") - - file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/autogenerated") - set(EMBED_METALLIB_ASSEMBLY "${CMAKE_BINARY_DIR}/autogenerated/ggml-embed-metallib.s") - set(EMBED_METALLIB_SOURCE "${CMAKE_BINARY_DIR}/autogenerated/ggml-metal-combined.metal") - - add_custom_command( - OUTPUT ${EMBED_METALLIB_SOURCE} - COMMAND sed -e "/^#include \\\"ggml-common.h\\\"/r ${COMMON_HEADER}" -e "/^#include \\\"ggml-common.h\\\"/d" ${METALLIB_SOURCE} > ${EMBED_METALLIB_SOURCE} - DEPENDS ${METALLIB_SOURCE} ${COMMON_HEADER} - COMMENT "Generating combined Metal library for embedding" - ) - - add_custom_command( - OUTPUT ${EMBED_METALLIB_ASSEMBLY} - COMMAND echo ".section __DATA,__ggml_metallib" > ${EMBED_METALLIB_ASSEMBLY} - COMMAND echo ".globl _ggml_metallib_start" >> ${EMBED_METALLIB_ASSEMBLY} - COMMAND echo "_ggml_metallib_start:" >> ${EMBED_METALLIB_ASSEMBLY} - COMMAND echo ".incbin \\\"${EMBED_METALLIB_SOURCE}\\\"" >> ${EMBED_METALLIB_ASSEMBLY} - COMMAND echo ".globl _ggml_metallib_end" >> ${EMBED_METALLIB_ASSEMBLY} - COMMAND echo "_ggml_metallib_end:" >> ${EMBED_METALLIB_ASSEMBLY} - DEPENDS ${EMBED_METALLIB_SOURCE} - COMMENT "Generate assembly for embedded Metal library" - ) - - set(GGML_SOURCES_METAL ${GGML_SOURCES_METAL} ${EMBED_METALLIB_ASSEMBLY}) - endif() - endif() - - if (WHISPER_COREML) - find_library(FOUNDATION_FRAMEWORK Foundation) - find_library(COREML_FRAMEWORK CoreML) - - if (COREML_FRAMEWORK) - message(STATUS "CoreML framework found") - - set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_USE_COREML) - else() - message(FATAL_ERROR "CoreML framework not found") - endif() - - if (WHISPER_COREML_ALLOW_FALLBACK) - set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_COREML_ALLOW_FALLBACK) - endif() - endif() -endif() - -if (WHISPER_BLAS) - if (WHISPER_STATIC) - set(BLA_STATIC ON) - endif() - #if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.22) - # set(BLA_SIZEOF_INTEGER 8) - #endif() - - set(BLA_VENDOR ${WHISPER_BLAS_VENDOR}) - find_package(BLAS) - - if (BLAS_FOUND) - message(STATUS "BLAS found, Libraries: ${BLAS_LIBRARIES}") - - if (("${BLAS_INCLUDE_DIRS}" STREQUAL "") AND NOT (${WHISPER_BLAS_VENDOR} MATCHES "Apple")) - # BLAS_INCLUDE_DIRS is missing in FindBLAS.cmake. - # see https://gitlab.kitware.com/cmake/cmake/-/issues/20268 - find_package(PkgConfig REQUIRED) - if (${WHISPER_BLAS_VENDOR} MATCHES "Generic") - pkg_check_modules(DepBLAS REQUIRED blas) - elseif (${WHISPER_BLAS_VENDOR} MATCHES "OpenBLAS") - # As of openblas v0.3.22, the 64-bit is named openblas64.pc - pkg_check_modules(DepBLAS openblas64) - if (NOT DepBLAS_FOUND) - pkg_check_modules(DepBLAS REQUIRED openblas) - endif() - elseif (${WHISPER_BLAS_VENDOR} MATCHES "FLAME") - pkg_check_modules(DepBLAS REQUIRED blis) - elseif (${WHISPER_BLAS_VENDOR} MATCHES "ATLAS") - pkg_check_modules(DepBLAS REQUIRED blas-atlas) - elseif (${WHISPER_BLAS_VENDOR} MATCHES "FlexiBLAS") - pkg_check_modules(DepBLAS REQUIRED flexiblas_api) - elseif (${WHISPER_BLAS_VENDOR} MATCHES "Intel") - # all Intel* libraries share the same include path - pkg_check_modules(DepBLAS REQUIRED mkl-sdl) - elseif (${WHISPER_BLAS_VENDOR} MATCHES "NVHPC") - # this doesn't provide pkg-config - # suggest to assign BLAS_INCLUDE_DIRS on your own - if ("${NVHPC_VERSION}" STREQUAL "") - message(WARNING "Better to set NVHPC_VERSION") - else() - set(DepBLAS_FOUND ON) - set(DepBLAS_INCLUDE_DIRS "/opt/nvidia/hpc_sdk/${CMAKE_SYSTEM_NAME}_${CMAKE_SYSTEM_PROCESSOR}/${NVHPC_VERSION}/math_libs/include") - endif() - endif() - if (DepBLAS_FOUND) - set(BLAS_INCLUDE_DIRS ${DepBLAS_INCLUDE_DIRS}) - else() - message(WARNING "BLAS_INCLUDE_DIRS neither been provided nor been automatically" - " detected by pkgconfig, trying to find cblas.h from possible paths...") - find_path(BLAS_INCLUDE_DIRS - NAMES cblas.h - HINTS - /usr/include - /usr/local/include - /usr/include/openblas - /opt/homebrew/opt/openblas/include - /usr/local/opt/openblas/include - /usr/include/x86_64-linux-gnu/openblas/include - ) - endif() - endif() - - message(STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS}") - - add_compile_options(${BLAS_LINKER_FLAGS}) - - add_compile_definitions(GGML_USE_BLAS) - - if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${WHISPER_BLAS_VENDOR} MATCHES "Generic" OR ${WHISPER_BLAS_VENDOR} MATCHES "Intel")) - add_compile_definitions(GGML_BLAS_USE_MKL) - endif() - - set(GGML_HEADERS_BLAS ggml-blas.h) - set(GGML_SOURCES_BLAS ggml-blas.cpp) - - set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${BLAS_LIBRARIES}) - set(WHISPER_EXTRA_INCLUDES ${WHISPER_EXTRA_INCLUDES} ${BLAS_INCLUDE_DIRS}) - else() - message(WARNING "BLAS not found, please refer to " - "https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors" - " to set correct WHISPER_BLAS_VENDOR") - endif() -endif() - -if (WHISPER_MKL) - find_package(MKL CONFIG REQUIRED PATHS $ENV{MKLROOT}) - message(STATUS "Imported oneMKL targets: ${MKL_IMPORTED_TARGETS}") - set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_OPENBLAS) - set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_BLAS_USE_MKL) -endif() - -if (WHISPER_CUBLAS) - message(WARNING "WHISPER_CUBLAS is deprecated and will be removed in the future.\nUse WHISPER_CUDA instead") - set(WHISPER_CUDA ON) -endif() - -if (WHISPER_CUDA) - cmake_minimum_required(VERSION 3.18) # for CMAKE_CUDA_ARCHITECTURES - - find_package(CUDAToolkit) - - if (CUDAToolkit_FOUND) - message(STATUS "cuBLAS found") - - if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES) - # 52 == lowest CUDA 12 standard - # 60 == f16 CUDA intrinsics - # 61 == integer CUDA intrinsics - # 70 == compute capability at which unrolling a loop in mul_mat_q kernels is faster - if (WHISPER_CUDA_F16 OR WHISPER_CUDA_DMMV_F16) - set(CMAKE_CUDA_ARCHITECTURES "60;61;70") # needed for f16 CUDA intrinsics - else() - set(CMAKE_CUDA_ARCHITECTURES "52;61;70") # lowest CUDA 12 standard + lowest for integer intrinsics - #set(CMAKE_CUDA_ARCHITECTURES "OFF") # use this to compile much faster, but only F16 models work - endif() - endif() - message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}") - - enable_language(CUDA) - - file(GLOB GGML_SOURCES_CUDA "ggml-cuda/*.cu") - list(APPEND GGML_SOURCES_CUDA ggml-cuda.h) - list(APPEND GGML_SOURCES_CUDA ggml-cuda.cu) - - file(GLOB SRCS "ggml-cuda/template-instances/fattn-wmma*.cu") - list(APPEND GGML_SOURCES_CUDA ${SRCS}) - file(GLOB SRCS "ggml-cuda/template-instances/mmq*.cu") - list(APPEND GGML_SOURCES_CUDA ${SRCS}) - - if (WHISPER_CUDA_FA_ALL_QUANTS) - file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*.cu") - list(APPEND GGML_SOURCES_CUDA ${SRCS}) - add_compile_definitions(GGML_CUDA_FA_ALL_QUANTS) - else() - file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu") - list(APPEND GGML_SOURCES_CUDA ${SRCS}) - file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu") - list(APPEND GGML_SOURCES_CUDA ${SRCS}) - file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*f16-f16.cu") - list(APPEND GGML_SOURCES_CUDA ${SRCS}) - endif() - - add_compile_definitions(GGML_USE_CUDA) - add_compile_definitions(GGML_CUDA_USE_GRAPHS) - - if (WHISPER_STATIC) - if (WIN32) - # As of 12.3.1 CUDA Tookit for Windows does not offer a static cublas library - set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas CUDA::cublasLt CUDA::cufft) - else () - set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static CUDA::cufft_static) - endif() - else() - set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt CUDA::cufft) - endif() - - set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cuda_driver) - else() - message(FATAL_ERROR "cuBLAS not found") - endif() +if (CMAKE_SYSTEM_NAME MATCHES "Linux") + option(WHISPER_FFMPEG "whisper: support building and linking with ffmpeg libs (avcodec, swresample, ...)" OFF) endif() +option(WHISPER_COREML "whisper: enable Core ML framework" OFF) +option(WHISPER_COREML_ALLOW_FALLBACK "whisper: allow non-CoreML fallback" OFF) +option(WHISPER_OPENVINO "whisper: support for OpenVINO" OFF) -if (WHISPER_HIPBLAS) - list(APPEND CMAKE_PREFIX_PATH /opt/rocm) - if (NOT ${CMAKE_C_COMPILER_ID} MATCHES "Clang") - message(WARNING "Only LLVM is supported for HIP, hint: CC=/opt/rocm/llvm/bin/clang") - endif() - if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") - message(WARNING "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++") - endif() - - find_package(hip) - find_package(hipblas) - find_package(rocblas) - - if (${hipblas_FOUND} AND ${hip_FOUND}) - message(STATUS "HIP and hipBLAS found") - set(GGML_HEADERS_ROCM "ggml-cuda.h") - - file(GLOB GGML_SOURCES_ROCM "ggml-cuda/*.cu") - list(APPEND GGML_SOURCES_ROCM "ggml-cuda.cu") - - file(GLOB SRCS "ggml-cuda/template-instances/fattn-wmma*.cu") - list(APPEND GGML_SOURCES_CUDA ${SRCS}) - file(GLOB SRCS "ggml-cuda/template-instances/mmq*.cu") - list(APPEND GGML_SOURCES_CUDA ${SRCS}) - - if (WHISPER_CUDA_FA_ALL_QUANTS) - file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*.cu") - list(APPEND GGML_SOURCES_CUDA ${SRCS}) - add_compile_definitions(GGML_CUDA_FA_ALL_QUANTS) - else() - file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu") - list(APPEND GGML_SOURCES_CUDA ${SRCS}) - file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu") - list(APPEND GGML_SOURCES_CUDA ${SRCS}) - file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*f16-f16.cu") - list(APPEND GGML_SOURCES_CUDA ${SRCS}) - endif() - - add_compile_definitions(GGML_USE_HIPBLAS GGML_USE_CUDA) - - set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX) - if (WHISPER_STATIC) - message(FATAL_ERROR "Static linking not supported for HIP/ROCm") - endif() - set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} hip::device PUBLIC hip::host roc::rocblas roc::hipblas) - else() - message(FATAL_ERROR "hipBLAS or HIP not found. Try setting CMAKE_PREFIX_PATH=/opt/rocm") - endif() -endif() - -if( WHISPER_OPENVINO ) - find_package(OpenVINO REQUIRED COMPONENTS Runtime) -endif() +# Required for relocatable CMake package +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake) -if (WHISPER_SYCL) - if ( NOT DEFINED ENV{ONEAPI_ROOT}) - message(FATAL_ERROR "Not detect ENV {ONEAPI_ROOT}, please install oneAPI & source it, like: source /opt/intel/oneapi/setvars.sh") - endif() - #todo: AOT +# override ggml options +set(GGML_SANITIZE_THREAD ${WHISPER_SANITIZE_THREAD}) +set(GGML_SANITIZE_ADDRESS ${WHISPER_SANITIZE_ADDRESS}) +set(GGML_SANITIZE_UNDEFINED ${WHISPER_SANITIZE_UNDEFINED}) +set(GGML_ALL_WARNINGS ${WHISPER_ALL_WARNINGS}) +set(GGML_FATAL_WARNINGS ${WHISPER_FATAL_WARNINGS}) - find_package(IntelSYCL REQUIRED) - if (WHISPER_SYCL_F16) - add_compile_definitions(GGML_SYCL_F16) +# transition helpers +function (whisper_option_depr TYPE OLD NEW) + if (${OLD}) + message(${TYPE} "${OLD} is deprecated and will be removed in the future.\nUse ${NEW} instead\n") + set(${NEW} ON) endif() - add_compile_definitions(GGML_USE_SYCL) +endfunction() - add_compile_options(-I./) #include DPCT - add_compile_options(-I/${SYCL_INCLUDE_DIR}) +whisper_option_depr(FATAL_ERROR WHISPER_CUBLAS GGML_CUDA) +whisper_option_depr(WARNING WHISPER_CUDA GGML_CUDA) +whisper_option_depr(WARNING WHISPER_KOMPUTE GGML_KOMPUTE) +whisper_option_depr(WARNING WHISPER_METAL GGML_METAL) +whisper_option_depr(WARNING WHISPER_METAL_EMBED_LIBRARY GGML_METAL_EMBED_LIBRARY) +whisper_option_depr(WARNING WHISPER_NATIVE GGML_NATIVE) +whisper_option_depr(WARNING WHISPER_OPENMP GGML_OPENMP) +whisper_option_depr(WARNING WHISPER_RPC GGML_RPC) +whisper_option_depr(WARNING WHISPER_SYCL GGML_SYCL) +whisper_option_depr(WARNING WHISPER_SYCL_F16 GGML_SYCL_F16) +whisper_option_depr(WARNING WHISPER_CCACHE GGML_CCACHE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl -L${MKLROOT}/lib") - - set(GGML_HEADERS_SYCL ggml-sycl.h) - file(GLOB GGML_SOURCES_SYCL "ggml-sycl/*.cpp") - list(APPEND GGML_SOURCES_SYCL "ggml-sycl.cpp") - - set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} sycl OpenCL mkl_core pthread m dl mkl_sycl_blas mkl_intel_ilp64 mkl_tbb_thread) +if (GGML_CUDA AND NOT MSVC) + #GGML_CUDA enabled, add the necessary compile options -Wno-deprecated-gpu-targets + add_compile_options(-Wno-deprecated-gpu-targets) endif() -# compiler flags - -if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) - set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE) - set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "RelWithDebInfo") -endif () -if (WHISPER_ALL_WARNINGS) - if (NOT MSVC) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} \ - -Wall \ - -Wextra \ - -Wpedantic \ - -Wshadow \ - -Wcast-qual \ - -Wstrict-prototypes \ - -Wpointer-arith \ - -Wno-unused-function \ - ") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} \ - -Wall \ - -Wextra \ - -Wpedantic \ - -Wcast-qual \ - ") - else() - # todo : msvc - endif() -endif() - -if (NOT MSVC) - # TODO: temporary disabled until we figure out ggml-metal.m - #set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror=vla") - #set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-math-errno -ffinite-math-only -funsafe-math-optimizations") -endif() - -message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}") +# +# build the library +# -if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") - message(STATUS "ARM detected") -elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64le") - message(STATUS "PowerPC detected") -else() - message(STATUS "x86 detected") - if (MSVC) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /utf-8") - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /utf-8") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /utf-8") - if(NOT WHISPER_NO_AVX512) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX512") - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX512") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX512") - # MSVC has no compile-time flags enabling specific - # AVX512 extensions, neither it defines the - # macros corresponding to the extensions. - # Do it manually. - if (NOT WHISPER_NO_AVX512_VBMI) - add_compile_definitions($<$:__AVX512VBMI__>) - add_compile_definitions($<$:__AVX512VBMI__>) - endif() - if (NOT WHISPER_NO_AVX512_VNNI) - add_compile_definitions($<$:__AVX512VNNI__>) - add_compile_definitions($<$:__AVX512VNNI__>) - endif() - elseif(NOT WHISPER_NO_AVX2) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2") - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX2") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX2") - elseif(NOT WHISPER_NO_AVX) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX") - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX") +if (NOT TARGET ggml) + if (WHISPER_USE_SYSTEM_GGML) + find_package(ggml REQUIRED) + if (NOT ggml_FOUND) + message(FATAL_ERROR "System-installed GGML library not found.") endif() + add_library(ggml ALIAS ggml::ggml) else() - if (EMSCRIPTEN) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread -s TOTAL_STACK=5242880") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread -s TOTAL_STACK=5242880") - else() - if(NOT WHISPER_NO_AVX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx") - endif() - if(NOT WHISPER_NO_AVX2) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2") - endif() - if(NOT WHISPER_NO_AVX512) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512cd -mavx512vl -mavx512dq -mavx512bw") - if(NOT WHISPER_NO_AVX512_VBMI) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vbmi") - endif() - if(NOT WHISPER_NO_AVX512_VNNI) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vnni") - endif() - endif() - if(NOT WHISPER_NO_FMA) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma") - endif() - if(NOT WHISPER_NO_F16C) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mf16c") - endif() + add_subdirectory(ggml) + if(WIN32) + # The following adds a _DISABLE_CONSTEXPR_MUTEX_CONSTRUCTOR macro and is a workaround for + # the Windows C++ standard library which does not support constexpr mutexes. + # From the release notes://github.com/microsoft/STL/wiki/Changelog + # Disable constexpr mutex constructor on Windows + # Fixed mutex's constructor to be constexpr. #3824 #4000 #4339 + # Note: Programs that aren't following the documented restrictions on binary compatibility may encounter + # null dereferences in mutex machinery. You must follow this rule: + # When you mix binaries built by different supported versions of the toolset, the Redistributable version + # must be at least as new as the latest toolset used by any app component. + # You can define _DISABLE_CONSTEXPR_MUTEX_CONSTRUCTOR as an escape hatch. + # + # Specifically to whisper.cpp this would cause a crash when using the Java bindings. + # resulting in a Invalid memory access error. + target_compile_definitions(ggml-base PRIVATE _DISABLE_CONSTEXPR_MUTEX_CONSTRUCTOR) endif() endif() + # ... otherwise assume ggml is added by a parent CMakeLists.txt endif() +add_subdirectory(src) # -# POSIX conformance +# install # -# clock_gettime came in POSIX.1b (1993) -# CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional -# posix_memalign came in POSIX.1-2001 / SUSv3 -# M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985) -add_compile_definitions(_XOPEN_SOURCE=600) - -# Somehow in OpenBSD whenever POSIX conformance is specified -# some string functions rely on locale_t availability, -# which was introduced in POSIX.1-2008, forcing us to go higher -if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD") - remove_definitions(-D_XOPEN_SOURCE=600) - add_compile_definitions(_XOPEN_SOURCE=700) -endif() - -# Data types, macros and functions related to controlling CPU affinity -# are available on Linux through GNU extensions in libc -if (CMAKE_SYSTEM_NAME MATCHES "Linux") - add_compile_definitions(_GNU_SOURCE) -endif() - -# RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1, -# and on macOS its availability depends on enabling Darwin extensions -# similarly on DragonFly, enabling BSD extensions is necessary -if (CMAKE_SYSTEM_NAME MATCHES "Darwin") - add_compile_definitions(_DARWIN_C_SOURCE) -endif() -if (CMAKE_SYSTEM_NAME MATCHES "DragonFly") - add_compile_definitions(_DARWIN_C_SOURCE) -endif() - -# alloca is a non-standard interface that is not visible on BSDs when -# POSIX conformance is specified, but not all of them provide a clean way -# to enable it in such cases -if (CMAKE_SYSTEM_NAME MATCHES "FreeBSD") - add_compile_definitions(__BSD_VISIBLE) -endif() -if (CMAKE_SYSTEM_NAME MATCHES "NetBSD") - add_compile_definitions(_NETBSD_SOURCE) -endif() -if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD") - add_compile_definitions(_BSD_SOURCE) -endif() - -if (WHISPER_PERF) - set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_PERF) -endif() - -# -# whisper.coreml - Core ML support -# - -if (WHISPER_COREML) - set(TARGET whisper.coreml) - - add_library(${TARGET} - coreml/whisper-encoder.h - coreml/whisper-encoder.mm - coreml/whisper-encoder-impl.h - coreml/whisper-encoder-impl.m - ) - - include(DefaultTargetOptions) - - target_include_directories(${TARGET} PUBLIC - . - ) - - target_link_libraries(${TARGET} PRIVATE ${FOUNDATION_FRAMEWORK} ${COREML_FRAMEWORK}) - - set_target_properties(${TARGET} PROPERTIES - COMPILE_FLAGS "-fobjc-arc" - ) - set_target_properties(${TARGET} PROPERTIES FOLDER "libs") -endif() - -if (WHISPER_OPENVINO) - set(TARGET whisper.openvino) - - add_library(${TARGET} OBJECT - openvino/whisper-openvino-encoder.h - openvino/whisper-openvino-encoder.cpp - ) - - target_include_directories(${TARGET} PUBLIC - . - ) +include(GNUInstallDirs) +include(CMakePackageConfigHelpers) - set_property(TARGET ${TARGET} PROPERTY POSITION_INDEPENDENT_CODE ON) - set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_USE_OPENVINO) +set(WHISPER_BUILD_NUMBER ${BUILD_NUMBER}) +set(WHISPER_BUILD_COMMIT ${BUILD_COMMIT}) +set(WHISPER_INSTALL_VERSION ${CMAKE_PROJECT_VERSION}) - target_link_libraries(${TARGET} PRIVATE openvino::runtime) - set_target_properties(${TARGET} PROPERTIES FOLDER "libs") -endif() +set(WHISPER_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files") +set(WHISPER_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files") +set(WHISPER_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files") -# -# whisper - this is the main library of the project -# +get_directory_property(WHISPER_TRANSIENT_DEFINES COMPILE_DEFINITIONS) -set(TARGET whisper) - -add_library(${TARGET} - ggml.h - ggml.c - ggml-alloc.h - ggml-alloc.c - ggml-backend.h - ggml-backend.c - ggml-quants.h - ggml-quants.c - ${GGML_SOURCES_METAL} - ${GGML_SOURCES_CUDA} - ${GGML_SOURCES_SYCL} ${GGML_HEADERS_SYCL} - ${GGML_SOURCES_ROCM} ${GGML_HEADERS_ROCM} - ${GGML_SOURCES_BLAS} ${GGML_HEADERS_BLAS} - whisper.h - whisper.cpp - ) +set_target_properties(whisper PROPERTIES PUBLIC_HEADER ${CMAKE_CURRENT_SOURCE_DIR}/include/whisper.h) +install(TARGETS whisper LIBRARY PUBLIC_HEADER) -if (WHISPER_CUDA) - target_sources(${TARGET} PRIVATE whisper-mel-cuda.cu) -endif() - -include_directories ( - . -) -# Set the version numbers -set_target_properties(whisper PROPERTIES - VERSION ${PROJECT_VERSION} - SOVERSION ${SOVERSION} +target_compile_definitions(whisper PRIVATE + WHISPER_VERSION="${PROJECT_VERSION}" ) -include(DefaultTargetOptions) - -target_include_directories(${TARGET} PUBLIC - . - ) - -if (WHISPER_COREML) - target_link_libraries(${TARGET} PRIVATE whisper.coreml) -endif() - -if (WHISPER_OPENVINO) - target_link_libraries(${TARGET} PRIVATE whisper.openvino) -endif() - -if (WHISPER_MKL) - target_link_libraries(${TARGET} PUBLIC MKL::MKL) -endif() - -if (MSVC) - target_link_libraries(${TARGET} PRIVATE ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT}) - - set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -D_CRT_SECURE_NO_WARNINGS) -else() - target_link_libraries(${TARGET} PRIVATE m ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT}) -endif() - -if (BUILD_SHARED_LIBS) - set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON) - target_link_libraries(${TARGET} PUBLIC - ${CMAKE_DL_LIBS} - ) - - target_compile_definitions(${TARGET} PUBLIC - WHISPER_SHARED - GGML_SHARED - ) - - target_compile_definitions(${TARGET} PRIVATE - WHISPER_BUILD - GGML_BUILD - ) - - if (WHISPER_METAL) - # TODO: I think this should make ggml-metal.m "see" the ggml-metal.metal file from the "bin" directory - # but for some reason it does not work here like it does in llama.cpp - set_target_properties(${TARGET} PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal") - endif() -endif() - -if (GGML_SOURCES_CUDA) - message(STATUS "GGML CUDA sources found, configuring CUDA architecture") - # Only configure gmml CUDA architectures is not globally set - if (NOT DEFINED GGML_CUDA_ARCHITECTURES) - # Not overriden by user, so set defaults - set(GGML_CUDA_ARCHITECTURES 52 61 70) - endif() - message(STATUS "GGML Configuring CUDA architectures ${GGML_CUDA_ARCHITECTURES}") - set_property(TARGET whisper PROPERTY CUDA_ARCHITECTURES ${GGML_CUDA_ARCHITECTURES}) - set_property(TARGET whisper PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto") -endif() - -if (EMSCRIPTEN) - set_target_properties(${TARGET} PROPERTIES COMPILE_FLAGS "-msimd128") -endif() - -target_compile_definitions(${TARGET} PUBLIC - ${WHISPER_EXTRA_FLAGS} - ) - -set_target_properties(${TARGET} PROPERTIES PUBLIC_HEADER "ggml.h;whisper.h") -set_target_properties(${TARGET} PROPERTIES FOLDER "libs") +configure_package_config_file( + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/whisper-config.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/whisper-config.cmake + INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/whisper + PATH_VARS + WHISPER_INCLUDE_INSTALL_DIR + WHISPER_LIB_INSTALL_DIR + WHISPER_BIN_INSTALL_DIR ) -include(GNUInstallDirs) +write_basic_package_version_file( + ${CMAKE_CURRENT_BINARY_DIR}/whisper-version.cmake + VERSION ${WHISPER_INSTALL_VERSION} + COMPATIBILITY SameMajorVersion) -install(TARGETS ${TARGET} - LIBRARY DESTINATION lib - ARCHIVE DESTINATION lib/static - RUNTIME DESTINATION bin - RESOURCE DESTINATION bin - PUBLIC_HEADER DESTINATION include - ) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/whisper-config.cmake + ${CMAKE_CURRENT_BINARY_DIR}/whisper-version.cmake + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/whisper) -# -# bindings -# +configure_file(cmake/whisper.pc.in + "${CMAKE_CURRENT_BINARY_DIR}/whisper.pc" + @ONLY) -add_subdirectory(bindings) +install(FILES "${CMAKE_CURRENT_BINARY_DIR}/whisper.pc" + DESTINATION lib/pkgconfig) # # programs, examples and tests # if (WHISPER_BUILD_TESTS AND NOT CMAKE_JS_VERSION) - enable_testing() + include(CTest) add_subdirectory(tests) endif () if (WHISPER_BUILD_EXAMPLES) add_subdirectory(examples) endif() + +if (MSVC) + set(MSVC_WARNING_FLAGS + /wd4101 # Unreferenced local variable + /wd4005 # Macro redefinition + /wd4065 # switch statement contains 'default' but no 'case' labels + /wd4267 # Conversion from 'size_t' to a smaller type, possible loss of data + /wd4244 # Conversion from one type to another type, possible loss of ata + /wd4805 # Unsafe mix of type + /wd4305 # Truncation from 'type1' to 'type2' (often double to float) + /wd4996 # Function or variable may be unsafe/deprecated + ) + function(disable_msvc_warnings target_name) + if(TARGET ${target_name}) + target_compile_options(${target_name} PRIVATE ${MSVC_WARNING_FLAGS}) + endif() + endfunction() + + if (WHISPER_BUILD_EXAMPLES) + disable_msvc_warnings(whisper) + disable_msvc_warnings(common) + disable_msvc_warnings(common-sdl) + disable_msvc_warnings(lsp) + disable_msvc_warnings(wchess-core) + disable_msvc_warnings(whisper-command) + disable_msvc_warnings(whisper-cli) + disable_msvc_warnings(whisper-server) + disable_msvc_warnings(whisper-stream) + disable_msvc_warnings(whisper-talk-llama) + disable_msvc_warnings(whisper-bench) + disable_msvc_warnings(quantize) + disable_msvc_warnings(vad-speech-segments) + endif() +endif() diff --git a/Makefile b/Makefile index adcbdbfe80d..97a26d48f92 100644 --- a/Makefile +++ b/Makefile @@ -1,514 +1,12 @@ -default: main bench quantize server - -ifndef UNAME_S -UNAME_S := $(shell uname -s) -endif - -ifndef UNAME_P -UNAME_P := $(shell uname -p) -endif - -ifndef UNAME_M -UNAME_M := $(shell uname -m) -endif - -ifndef NVCC_VERSION - ifeq ($(call,$(shell which nvcc))$(.SHELLSTATUS),0) - NVCC_VERSION := $(shell nvcc --version | egrep -o "V[0-9]+.[0-9]+.[0-9]+" | cut -c2-) - endif -endif - -# In GNU make default CXX is g++ instead of c++. Let's fix that so that users -# of non-gcc compilers don't have to provide g++ alias or wrapper. -DEFCC := cc -DEFCXX := c++ -ifeq ($(origin CC),default) -CC := $(DEFCC) -endif -ifeq ($(origin CXX),default) -CXX := $(DEFCXX) -endif - -CCV := $(shell $(CC) --version | head -n 1) -CXXV := $(shell $(CXX) --version | head -n 1) - -# Mac OS + Arm can report x86_64 -# ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789 -ifeq ($(UNAME_S),Darwin) - WHISPER_NO_OPENMP := 1 - - ifneq ($(UNAME_P),arm) - SYSCTL_M := $(shell sysctl -n hw.optional.arm64) - ifeq ($(SYSCTL_M),1) - # UNAME_P := arm - # UNAME_M := arm64 - warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789) - endif - endif -endif - -# -# Compile flags -# - -CFLAGS = -I. -O3 -DNDEBUG -std=c11 -fPIC -CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC -LDFLAGS = - -ifdef MACOSX_DEPLOYMENT_TARGET - CFLAGS += -mmacosx-version-min=$(MACOSX_DEPLOYMENT_TARGET) - CXXFLAGS += -mmacosx-version-min=$(MACOSX_DEPLOYMENT_TARGET) - LDFLAGS += -mmacosx-version-min=$(MACOSX_DEPLOYMENT_TARGET) -endif - -# clock_gettime came in POSIX.1b (1993) -# CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional -# posix_memalign came in POSIX.1-2001 / SUSv3 -# M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985) -CFLAGS += -D_XOPEN_SOURCE=600 -CXXFLAGS += -D_XOPEN_SOURCE=600 - -# Somehow in OpenBSD whenever POSIX conformance is specified -# some string functions rely on locale_t availability, -# which was introduced in POSIX.1-2008, forcing us to go higher -ifeq ($(UNAME_S),OpenBSD) - CFLAGS += -U_XOPEN_SOURCE -D_XOPEN_SOURCE=700 - CXXFLAGS += -U_XOPEN_SOURCE -D_XOPEN_SOURCE=700 -endif - -# Data types, macros and functions related to controlling CPU affinity -# are available on Linux through GNU extensions in libc -ifeq ($(UNAME_S),Linux) - CFLAGS += -D_GNU_SOURCE - CXXFLAGS += -D_GNU_SOURCE -endif - -# RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1, -# and on macOS its availability depends on enabling Darwin extensions -# similarly on DragonFly, enabling BSD extensions is necessary -ifeq ($(UNAME_S),Darwin) - CFLAGS += -D_DARWIN_C_SOURCE - CXXFLAGS += -D_DARWIN_C_SOURCE -endif -ifeq ($(UNAME_S),DragonFly) - CFLAGS += -D__BSD_VISIBLE - CXXFLAGS += -D__BSD_VISIBLE -endif - -# alloca is a non-standard interface that is not visible on BSDs when -# POSIX conformance is specified, but not all of them provide a clean way -# to enable it in such cases -ifeq ($(UNAME_S),FreeBSD) - CFLAGS += -D__BSD_VISIBLE - CXXFLAGS += -D__BSD_VISIBLE -endif -ifeq ($(UNAME_S),NetBSD) - CFLAGS += -D_NETBSD_SOURCE - CXXFLAGS += -D_NETBSD_SOURCE -endif -ifeq ($(UNAME_S),OpenBSD) - CFLAGS += -D_BSD_SOURCE - CXXFLAGS += -D_BSD_SOURCE -endif - -# OS specific -# TODO: support Windows -ifeq ($(filter $(UNAME_S),Linux Darwin DragonFly FreeBSD NetBSD OpenBSD Haiku),$(UNAME_S)) - CFLAGS += -pthread - CXXFLAGS += -pthread -endif - -# detect Windows -ifneq ($(findstring _NT,$(UNAME_S)),) - _WIN32 := 1 -endif - -# Windows Sockets 2 (Winsock) for network-capable apps -ifeq ($(_WIN32),1) - LWINSOCK2 := -lws2_32 -endif - -# Architecture specific -# TODO: probably these flags need to be tweaked on some architectures -# feel free to update the Makefile for your architecture and send a pull request or issue -ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64)) - ifeq ($(UNAME_S),Darwin) - CPUINFO_CMD := sysctl machdep.cpu.features machdep.cpu.leaf7_features - else ifeq ($(UNAME_S),Linux) - CPUINFO_CMD := cat /proc/cpuinfo - else ifneq (,$(filter MINGW32_NT% MINGW64_NT% MSYS_NT%,$(UNAME_S))) - CPUINFO_CMD := cat /proc/cpuinfo - else ifneq (,$(filter DragonFly FreeBSD,$(UNAME_S))) - CPUINFO_CMD := grep Features /var/run/dmesg.boot - else ifeq ($(UNAME_S),Haiku) - CPUINFO_CMD := sysinfo -cpu - endif - - # x86 ISA extensions (chronological order) - ifdef CPUINFO_CMD - SSE3_M := $(shell $(CPUINFO_CMD) | grep -iwE 'PNI|SSE3') - SSSE3_M := $(shell $(CPUINFO_CMD) | grep -iw 'SSSE3') - AVX_M := $(shell $(CPUINFO_CMD) | grep -iwE 'AVX|AVX1.0') - F16C_M := $(shell $(CPUINFO_CMD) | grep -iw 'F16C') - FMA_M := $(shell $(CPUINFO_CMD) | grep -iw 'FMA') - AVX2_M := $(shell $(CPUINFO_CMD) | grep -iw 'AVX2') - AVX512F_M := $(shell $(CPUINFO_CMD) | grep -iw 'AVX512F') - AVX512VBMI_M := $(shell $(CPUINFO_CMD) | grep -iw 'AVX512VBMI') - AVX512VNNI_M := $(shell $(CPUINFO_CMD) | grep -iwE 'AVX512_VNNI|AVX512VNNI') - - # AVX-512 has many subsets, so let's make it easy to disable them all - ifneq ($(filter-out 0,$(WHISPER_NO_AVX512)),) - AVX512F_M := - AVX512VBMI_M := - AVX512VNNI_M := - endif - - ifneq (,$(SSE3_M)) - CFLAGS += -msse3 - CXXFLAGS += -msse3 - endif - - ifneq (,$(SSSE3_M)) - CFLAGS += -mssse3 - CXXFLAGS += -mssse3 - endif - - ifneq (,$(AVX_M)) - CFLAGS += -mavx - CXXFLAGS += -mavx - endif - - ifneq (,$(F16C_M)) - CFLAGS += -mf16c - CXXFLAGS += -mf16c - endif - - ifneq (,$(FMA_M)) - CFLAGS += -mfma - CXXFLAGS += -mfma - endif - - ifneq (,$(AVX2_M)) - CFLAGS += -mavx2 - CXXFLAGS += -mavx2 - endif - - ifneq (,$(AVX512F_M)) - CFLAGS += -mavx512f -mavx512cd -mavx512vl -mavx512dq -mavx512bw - CXXFLAGS += -mavx512f -mavx512cd -mavx512vl -mavx512dq -mavx512bw - endif - - ifneq (,$(AVX512VBMI_M)) - CFLAGS += -mavx512vbmi - CXXFLAGS += -mavx512vbmi - endif - - ifneq (,$(AVX512VNNI_M)) - CFLAGS += -mavx512vnni - CXXFLAGS += -mavx512vnni - endif - endif -endif - -ifneq ($(filter ppc64%,$(UNAME_M)),) - POWER9_M := $(shell grep "POWER9" /proc/cpuinfo) - ifneq (,$(findstring POWER9,$(POWER9_M))) - CFLAGS += -mpower9-vector - endif - # Require c++23's std::byteswap for big-endian support. - ifeq ($(UNAME_M),ppc64) - CXXFLAGS += -std=c++23 -DGGML_BIG_ENDIAN - endif -endif - -ifndef WHISPER_NO_ACCELERATE - # Mac M1 - include Accelerate framework - ifeq ($(UNAME_S),Darwin) - CFLAGS += -DGGML_USE_ACCELERATE -DGGML_USE_BLAS - CFLAGS += -DACCELERATE_NEW_LAPACK - CFLAGS += -DACCELERATE_LAPACK_ILP64 - CXXFLAGS += -DGGML_USE_ACCELERATE -DGGML_USE_BLAS - CXXFLAGS += -DACCELERATE_NEW_LAPACK - CXXFLAGS += -DACCELERATE_LAPACK_ILP64 - LDFLAGS += -framework Accelerate - WHISPER_OBJ += ggml-blas.o - endif -endif - -ifdef WHISPER_COREML - CXXFLAGS += -DWHISPER_USE_COREML - LDFLAGS += -framework Foundation -framework CoreML - -ifdef WHISPER_COREML_ALLOW_FALLBACK - CXXFLAGS += -DWHISPER_COREML_ALLOW_FALLBACK -endif -endif - -ifndef WHISPER_NO_METAL - ifeq ($(UNAME_S),Darwin) - WHISPER_METAL := 1 - - CFLAGS += -DGGML_USE_METAL - CXXFLAGS += -DGGML_USE_METAL - LDFLAGS += -framework Foundation -framework Metal -framework MetalKit - endif -endif - -ifndef WHISPER_NO_OPENMP - CXXFLAGS += -DGGML_USE_OPENMP - CFLAGS += -fopenmp - CXXFLAGS += -fopenmp -endif # WHISPER_NO_OPENMP - -ifdef WHISPER_OPENBLAS - CXXFLAGS += -DGGML_USE_BLAS $(shell pkg-config --cflags-only-I openblas) - CFLAGS += $(shell pkg-config --cflags-only-other openblas) - LDFLAGS += $(shell pkg-config --libs openblas) - WHISPER_OBJ += ggml-blas.o -endif # WHISPER_OPENBLAS - -ifdef WHISPER_OPENBLAS64 - CXXFLAGS += -DGGML_USE_BLAS $(shell pkg-config --cflags-only-I openblas64) - CFLAGS += $(shell pkg-config --cflags-only-other openblas64) - LDFLAGS += $(shell pkg-config --libs openblas64) - WHISPER_OBJ += ggml-blas.o -endif # WHISPER_OPENBLAS64 - -ifdef WHISPER_BLIS - CXXFLAGS += -DGGML_USE_BLAS -I/usr/local/include/blis -I/usr/include/blis - LDFLAGS += -lblis -L/usr/local/lib - WHISPER_OBJ += ggml-blas.o -endif # WHISPER_BLIS - -ifdef WHISPER_CUBLAS -# WHISPER_CUBLAS is deprecated and will be removed in the future - WHISPER_CUDA := 1 -endif - -OBJS_CUDA_TEMP_INST = $(patsubst %.cu,%.o,$(wildcard ggml-cuda/template-instances/fattn-wmma*.cu)) -OBJS_CUDA_TEMP_INST += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/template-instances/mmq*.cu)) -ifdef WHISPER_CUDA_FA_ALL_QUANTS - OBJS_CUDA_TEMP_INST += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/template-instances/fattn-vec*.cu)) -else - OBJS_CUDA_TEMP_INST += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu)) - OBJS_CUDA_TEMP_INST += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu)) - OBJS_CUDA_TEMP_INST += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/template-instances/fattn-vec*f16-f16.cu)) -endif # WHISPER_CUDA_FA_ALL_QUANTS - -ifdef WHISPER_CUDA - ifeq ($(shell expr $(NVCC_VERSION) \>= 11.6), 1) - CUDA_ARCH_FLAG ?= native - else - CUDA_ARCH_FLAG ?= all - endif - - CFLAGS += -DGGML_USE_CUDA -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include - CXXFLAGS += -DGGML_USE_CUDA -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include -DGGML_CUDA_USE_GRAPHS - LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lcufft -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L/usr/lib/wsl/lib - WHISPER_OBJ += ggml-cuda.o whisper-mel-cuda.o - WHISPER_OBJ += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu)) - WHISPER_OBJ += $(OBJS_CUDA_TEMP_INST) - NVCC = nvcc - NVCCFLAGS = --forward-unknown-to-host-compiler -arch=$(CUDA_ARCH_FLAG) - -ggml-cuda/%.o: ggml-cuda/%.cu ggml.h ggml-common.h ggml-cuda/common.cuh - $(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -c $< -o $@ - -ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml.h ggml-backend.h ggml-backend-impl.h ggml-common.h $(wildcard ggml-cuda/*.cuh) - $(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@ - -whisper-mel-cuda.o: whisper-mel-cuda.cu whisper.h ggml.h ggml-backend.h whisper-mel.hpp whisper-mel-cuda.hpp - $(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@ -endif - -ifdef WHISPER_HIPBLAS - ROCM_PATH ?= /opt/rocm - HIPCC ?= $(ROCM_PATH)/bin/hipcc - GPU_TARGETS ?= $(shell $(ROCM_PATH)/llvm/bin/amdgpu-arch) - CFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUDA - CXXFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUDA - LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib - LDFLAGS += -lhipblas -lamdhip64 -lrocblas - HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS)) - WHISPER_OBJ += ggml-cuda.o - WHISPER_OBJ += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu)) - WHISPER_OBJ += $(OBJS_CUDA_TEMP_INST) - -ggml-cuda/%.o: ggml-cuda/%.cu ggml-cuda/%.cuh ggml.h ggml-common.h ggml-cuda/common.cuh - $(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $< - -ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml.h ggml-backend.h ggml-backend-impl.h ggml-common.h $(wildcard ggml-cuda/*.cuh) - $(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $< -endif - -ifdef WHISPER_GPROF - CFLAGS += -pg - CXXFLAGS += -pg -endif - -ifneq ($(filter aarch64%,$(UNAME_M)),) - CFLAGS += -mcpu=native - CXXFLAGS += -mcpu=native -endif - -ifneq ($(filter armv6%,$(UNAME_M)),) - # 32-bit Raspberry Pi 1, 2, 3 - CFLAGS += -mfpu=neon -mfp16-format=ieee -mno-unaligned-access -endif - -ifneq ($(filter armv7%,$(UNAME_M)),) - # 32-bit ARM, for example on Armbian or possibly raspbian - #CFLAGS += -mfpu=neon -mfp16-format=ieee -funsafe-math-optimizations -mno-unaligned-access - #CXXFLAGS += -mfpu=neon -mfp16-format=ieee -funsafe-math-optimizations -mno-unaligned-access - - # 64-bit ARM on 32-bit OS, use these (TODO: auto-detect 64-bit) - CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -funsafe-math-optimizations -mno-unaligned-access - CXXFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -funsafe-math-optimizations -mno-unaligned-access -endif - -ifneq ($(filter armv8%,$(UNAME_M)),) - # Raspberry Pi 4 - CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -funsafe-math-optimizations -mno-unaligned-access - CXXFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -funsafe-math-optimizations -mno-unaligned-access -endif - -# -# Print build information -# - -$(info I whisper.cpp build info: ) -$(info I UNAME_S: $(UNAME_S)) -$(info I UNAME_P: $(UNAME_P)) -$(info I UNAME_M: $(UNAME_M)) -$(info I CFLAGS: $(CFLAGS)) -$(info I CXXFLAGS: $(CXXFLAGS)) -$(info I LDFLAGS: $(LDFLAGS)) -$(info I CC: $(CCV)) -$(info I CXX: $(CXXV)) -$(info ) - -ifdef WHISPER_CUBLAS -$(info !!!!) -$(info WHISPER_CUBLAS is deprecated and will be removed in the future. Use WHISPER_CUDA instead.) -$(info !!!!) -$(info ) -endif - -# -# Build library -# - -ggml.o: ggml.c ggml.h ggml-cuda.h - $(CC) $(CFLAGS) -c $< -o $@ - -ggml-alloc.o: ggml-alloc.c ggml.h ggml-alloc.h - $(CC) $(CFLAGS) -c $< -o $@ - -ggml-backend.o: ggml-backend.c ggml.h ggml-backend.h - $(CC) $(CFLAGS) -c $< -o $@ - -ggml-quants.o: ggml-quants.c ggml.h ggml-quants.h - $(CC) $(CFLAGS) -c $< -o $@ - -ggml-blas.o: ggml-blas.cpp ggml-blas.h - $(CXX) $(CXXFLAGS) -c $< -o $@ - -WHISPER_OBJ += ggml.o ggml-alloc.o ggml-backend.o ggml-quants.o - -whisper.o: whisper.cpp whisper.h whisper-mel.hpp ggml.h ggml-cuda.h - $(CXX) $(CXXFLAGS) -c $< -o $@ - -ifndef WHISPER_COREML -WHISPER_OBJ += whisper.o -else -whisper-encoder.o: coreml/whisper-encoder.mm coreml/whisper-encoder.h - $(CXX) -O3 -I . -fobjc-arc -c coreml/whisper-encoder.mm -o whisper-encoder.o - -whisper-encoder-impl.o: coreml/whisper-encoder-impl.m coreml/whisper-encoder-impl.h - $(CXX) -O3 -I . -fobjc-arc -c coreml/whisper-encoder-impl.m -o whisper-encoder-impl.o - -WHISPER_OBJ += whisper.o whisper-encoder.o whisper-encoder-impl.o -endif - -ifdef WHISPER_METAL -ggml-metal.o: ggml-metal.m ggml-metal.h - $(CC) $(CFLAGS) -c $< -o $@ - -WHISPER_OBJ += ggml-metal.o - -ifdef WHISPER_METAL_EMBED_LIBRARY -CFLAGS += -DGGML_METAL_EMBED_LIBRARY - -ggml-metal-embed.o: ggml-metal.metal ggml-common.h - @echo "Embedding Metal library" - $(eval TEMP_ASSEMBLY=$(shell mktemp)) - $(eval TEMP_METALLIB=$(shell mktemp)) - @sed "/^#include \"ggml-common.h\"/{r ggml-common.h"$$'\n'"d;}" ggml-metal.metal > $(TEMP_METALLIB) - @echo ".section __DATA, __ggml_metallib" > $(TEMP_ASSEMBLY) - @echo ".globl _ggml_metallib_start" >> $(TEMP_ASSEMBLY) - @echo "_ggml_metallib_start:" >> $(TEMP_ASSEMBLY) - @echo ".incbin \"$(TEMP_METALLIB)\"" >> $(TEMP_ASSEMBLY) - @echo ".globl _ggml_metallib_end" >> $(TEMP_ASSEMBLY) - @echo "_ggml_metallib_end:" >> $(TEMP_ASSEMBLY) - @$(AS) $(TEMP_ASSEMBLY) -o $@ - @rm -f $(TEMP_ASSEMBLY) $(TEMP_METALLIB) - -WHISPER_OBJ += ggml-metal-embed.o -endif -endif - -libwhisper.a: $(WHISPER_OBJ) - $(AR) rcs libwhisper.a $(WHISPER_OBJ) - -libwhisper.so: $(WHISPER_OBJ) - $(CXX) $(CXXFLAGS) -shared -o libwhisper.so $(WHISPER_OBJ) $(LDFLAGS) - -clean: - rm -f *.o main stream command talk talk-llama bench quantize server lsp libwhisper.a libwhisper.so - rm -vrf ggml-cuda/*.o - rm -vrf ggml-cuda/template-instances/*.o - -# -# Examples -# - -CC_SDL=`sdl2-config --cflags --libs` - -SRC_COMMON = examples/common.cpp examples/common-ggml.cpp examples/grammar-parser.cpp -SRC_COMMON_SDL = examples/common-sdl.cpp - -main: examples/main/main.cpp $(SRC_COMMON) $(WHISPER_OBJ) - $(CXX) $(CXXFLAGS) examples/main/main.cpp $(SRC_COMMON) $(WHISPER_OBJ) -o main $(LDFLAGS) - ./main -h - -bench: examples/bench/bench.cpp $(WHISPER_OBJ) - $(CXX) $(CXXFLAGS) examples/bench/bench.cpp $(WHISPER_OBJ) -o bench $(LDFLAGS) - -quantize: examples/quantize/quantize.cpp $(WHISPER_OBJ) $(SRC_COMMON) - $(CXX) $(CXXFLAGS) examples/quantize/quantize.cpp $(SRC_COMMON) $(WHISPER_OBJ) -o quantize $(LDFLAGS) - -server: examples/server/server.cpp $(SRC_COMMON) $(WHISPER_OBJ) - $(CXX) $(CXXFLAGS) examples/server/server.cpp $(SRC_COMMON) $(WHISPER_OBJ) -o server $(LDFLAGS) $(LWINSOCK2) - -stream: examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) - $(CXX) $(CXXFLAGS) examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o stream $(CC_SDL) $(LDFLAGS) - -command: examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) - $(CXX) $(CXXFLAGS) examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o command $(CC_SDL) $(LDFLAGS) - -lsp: examples/lsp/lsp.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) - $(CXX) $(CXXFLAGS) examples/lsp/lsp.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o lsp $(CC_SDL) $(LDFLAGS) - -talk: examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) - $(CXX) $(CXXFLAGS) examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o talk $(CC_SDL) $(LDFLAGS) - -talk-llama: examples/talk-llama/talk-llama.cpp examples/talk-llama/llama.cpp examples/talk-llama/unicode.cpp examples/talk-llama/unicode-data.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) - $(CXX) $(CXXFLAGS) examples/talk-llama/talk-llama.cpp examples/talk-llama/llama.cpp examples/talk-llama/unicode.cpp examples/talk-llama/unicode-data.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o talk-llama $(CC_SDL) $(LDFLAGS) - # # Audio samples # +.PHONY: build +build: + cmake -B build $(CMAKE_ARGS) + cmake --build build --config Release + # download a few audio samples into folder "./samples": .PHONY: samples samples: @@ -520,17 +18,6 @@ samples: @wget --quiet --show-progress -O samples/mm1.wav https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav @wget --quiet --show-progress -O samples/a13.mp3 https://upload.wikimedia.org/wikipedia/commons/transcoded/6/6f/Apollo13-wehaveaproblem.ogg/Apollo13-wehaveaproblem.ogg.mp3 @wget --quiet --show-progress -O samples/diffusion2023-07-03.flac https://archive.org/download/diffusion2023-07-03/diffusion2023-07-03.flac - @echo "Converting to 16-bit WAV ..." - @ffmpeg -loglevel -0 -y -i samples/gb0.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/gb0.wav - @ffmpeg -loglevel -0 -y -i samples/gb1.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/gb1.wav - @ffmpeg -loglevel -0 -y -i samples/hp0.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/hp0.wav - @rm samples/*.ogg - @ffmpeg -loglevel -0 -y -i samples/mm1.wav -ar 16000 -ac 1 -c:a pcm_s16le samples/mm0.wav - @rm samples/mm1.wav - @ffmpeg -loglevel -0 -y -i samples/a13.mp3 -ar 16000 -ac 1 -c:a pcm_s16le -ss 00:00:00 -to 00:00:30 samples/a13.wav - @rm samples/a13.mp3 - @ffmpeg -loglevel -0 -y -i samples/diffusion2023-07-03.flac -ar 16000 -ac 1 -c:a pcm_s16le samples/diffusion2023-07-03.wav - @rm samples/diffusion2023-07-03.flac # # Models @@ -550,27 +37,22 @@ samples: .PHONY: large-v1 .PHONY: large-v2 .PHONY: large-v3 +.PHONY: large-v3-turbo -tiny.en tiny base.en base small.en small medium.en medium large-v1 large-v2 large-v3: main +tiny.en tiny base.en base small.en small medium.en medium large-v1 large-v2 large-v3 large-v3-turbo: bash ./models/download-ggml-model.sh $@ + cmake -B build $(CMAKE_ARGS) + cmake --build build --config Release @echo "" @echo "===============================================" @echo "Running $@ on all samples in ./samples ..." @echo "===============================================" @echo "" - @for f in samples/*.wav; do \ + @for f in samples/*.{flac,mp3,ogg,wav}; do \ echo "----------------------------------------------" ; \ echo "[+] Running $@ on $$f ... (run 'ffplay $$f' to listen)" ; \ - echo "----------------------------------------------" ; \ + echo "----------------------------------------------" ; \ echo "" ; \ - ./main -m models/ggml-$@.bin -f $$f ; \ + ./build/bin/whisper-cli -m models/ggml-$@.bin -f $$f ; \ echo "" ; \ done - -# -# Tests -# - -.PHONY: tests -tests: - bash ./tests/run-tests.sh $(word 2, $(MAKECMDGOALS)) diff --git a/Package.swift b/Package.swift deleted file mode 100644 index bbb7fb03b99..00000000000 --- a/Package.swift +++ /dev/null @@ -1,61 +0,0 @@ -// swift-tools-version:5.5 - -import PackageDescription - -let package = Package( - name: "whisper", - platforms: [ - .macOS(.v12), - .iOS(.v14), - .watchOS(.v4), - .tvOS(.v14) - ], - products: [ - .library(name: "whisper", targets: ["whisper"]), - ], - targets: [ - .target( - name: "whisper", - path: ".", - exclude: [ - "bindings", - "cmake", - "coreml", - "examples", - "extra", - "models", - "samples", - "tests", - "CMakeLists.txt", - "ggml-cuda.cu", - "ggml-cuda.h", - "Makefile" - ], - sources: [ - "ggml.c", - "whisper.cpp", - "ggml-alloc.c", - "ggml-backend.c", - "ggml-quants.c", - "ggml-metal.m" - ], - resources: [.process("ggml-metal.metal")], - publicHeadersPath: "spm-headers", - cSettings: [ - .unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]), - .define("GGML_USE_ACCELERATE"), - .unsafeFlags(["-fno-objc-arc"]), - .define("GGML_USE_METAL") - // NOTE: NEW_LAPACK will required iOS version 16.4+ - // We should consider add this in the future when we drop support for iOS 14 - // (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc) - // .define("ACCELERATE_NEW_LAPACK"), - // .define("ACCELERATE_LAPACK_ILP64") - ], - linkerSettings: [ - .linkedFramework("Accelerate") - ] - ) - ], - cxxLanguageStandard: .cxx11 -) diff --git a/README.md b/README.md index 289869d861c..f197c93401d 100644 --- a/README.md +++ b/README.md @@ -2,26 +2,30 @@ ![whisper.cpp](https://user-images.githubusercontent.com/1991296/235238348-05d0f6a4-da44-4900-a1de-d0707e75b763.jpeg) -[![Actions Status](https://github.com/ggerganov/whisper.cpp/workflows/CI/badge.svg)](https://github.com/ggerganov/whisper.cpp/actions) +[![Actions Status](https://github.com/ggml-org/whisper.cpp/workflows/CI/badge.svg)](https://github.com/ggml-org/whisper.cpp/actions) [![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Conan Center](https://shields.io/conan/v/whisper-cpp)](https://conan.io/center/whisper-cpp) [![npm](https://img.shields.io/npm/v/whisper.cpp.svg)](https://www.npmjs.com/package/whisper.cpp/) -Stable: [v1.6.2](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.6.0) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126) +Stable: [v1.8.1](https://github.com/ggml-org/whisper.cpp/releases/tag/v1.8.1) / [Roadmap](https://github.com/orgs/ggml-org/projects/4/) High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model: - Plain C/C++ implementation without dependencies -- Apple Silicon first-class citizen - optimized via ARM NEON, Accelerate framework, Metal and [Core ML](https://github.com/ggerganov/whisper.cpp#core-ml-support) +- Apple Silicon first-class citizen - optimized via ARM NEON, Accelerate framework, Metal and [Core ML](#core-ml-support) - AVX intrinsics support for x86 architectures -- VSX intrinsics support for POWER architectures +- [VSX intrinsics support for POWER architectures](#power-vsx-intrinsics) - Mixed F16 / F32 precision -- [4-bit and 5-bit integer quantization support](https://github.com/ggerganov/whisper.cpp#quantization) +- [Integer quantization support](#quantization) - Zero memory allocations at runtime +- [Vulkan support](#vulkan-gpu-support) - Support for CPU-only inference -- [Efficient GPU support for NVIDIA](https://github.com/ggerganov/whisper.cpp#nvidia-gpu-support-via-cublas) -- [OpenVINO Support](https://github.com/ggerganov/whisper.cpp#openvino-support) -- [C-style API](https://github.com/ggerganov/whisper.cpp/blob/master/whisper.h) +- [Efficient GPU support for NVIDIA](#nvidia-gpu-support) +- [OpenVINO Support](#openvino-support) +- [Ascend NPU Support](#ascend-npu-support) +- [Moore Threads GPU Support](#moore-threads-gpu-support) +- [C-style API](https://github.com/ggml-org/whisper.cpp/blob/master/include/whisper.h) +- [Voice Activity Detection (VAD)](#voice-activity-detection-vad) Supported platforms: @@ -29,14 +33,14 @@ Supported platforms: - [x] [iOS](examples/whisper.objc) - [x] [Android](examples/whisper.android) - [x] [Java](bindings/java/README.md) -- [x] Linux / [FreeBSD](https://github.com/ggerganov/whisper.cpp/issues/56#issuecomment-1350920264) +- [x] Linux / [FreeBSD](https://github.com/ggml-org/whisper.cpp/issues/56#issuecomment-1350920264) - [x] [WebAssembly](examples/whisper.wasm) -- [x] Windows ([MSVC](https://github.com/ggerganov/whisper.cpp/blob/master/.github/workflows/build.yml#L117-L144) and [MinGW](https://github.com/ggerganov/whisper.cpp/issues/168)] -- [x] [Raspberry Pi](https://github.com/ggerganov/whisper.cpp/discussions/166) -- [x] [docker](https://github.com/ggerganov/whisper.cpp/pkgs/container/whisper.cpp) +- [x] Windows ([MSVC](https://github.com/ggml-org/whisper.cpp/blob/master/.github/workflows/build.yml#L117-L144) and [MinGW](https://github.com/ggml-org/whisper.cpp/issues/168)) +- [x] [Raspberry Pi](https://github.com/ggml-org/whisper.cpp/discussions/166) +- [x] [Docker](https://github.com/ggml-org/whisper.cpp/pkgs/container/whisper.cpp) -The entire high-level implementation of the model is contained in [whisper.h](whisper.h) and [whisper.cpp](whisper.cpp). -The rest of the code is part of the [`ggml`](https://github.com/ggerganov/ggml) machine learning library. +The entire high-level implementation of the model is contained in [whisper.h](include/whisper.h) and [whisper.cpp](src/whisper.cpp). +The rest of the code is part of the [`ggml`](https://github.com/ggml-org/ggml) machine learning library. Having such a lightweight implementation of the model allows to easily integrate it in different platforms and applications. As an example, here is a video of running the model on an iPhone 13 device - fully offline, on-device: [whisper.objc](examples/whisper.objc) @@ -49,162 +53,48 @@ https://user-images.githubusercontent.com/1991296/204038393-2f846eae-c255-4099-a On Apple Silicon, the inference runs fully on the GPU via Metal: -https://github.com/ggerganov/whisper.cpp/assets/1991296/c82e8f86-60dc-49f2-b048-d2fdbd6b5225 - -Or you can even run it straight in the browser: [talk.wasm](examples/talk.wasm) - -## Implementation details - -- The core tensor operations are implemented in C ([ggml.h](ggml.h) / [ggml.c](ggml.c)) -- The transformer model and the high-level C-style API are implemented in C++ ([whisper.h](whisper.h) / [whisper.cpp](whisper.cpp)) -- Sample usage is demonstrated in [main.cpp](examples/main) -- Sample real-time audio transcription from the microphone is demonstrated in [stream.cpp](examples/stream) -- Various other examples are available in the [examples](examples) folder - -The tensor operators are optimized heavily for Apple silicon CPUs. Depending on the computation size, Arm Neon SIMD intrinsics or CBLAS Accelerate framework routines are used. The latter are especially effective for bigger sizes since the Accelerate framework utilizes the special-purpose AMX coprocessor available in modern Apple products. +https://github.com/ggml-org/whisper.cpp/assets/1991296/c82e8f86-60dc-49f2-b048-d2fdbd6b5225 ## Quick start First clone the repository: ```bash -git clone https://github.com/ggerganov/whisper.cpp.git +git clone https://github.com/ggml-org/whisper.cpp.git +``` + +Navigate into the directory: + +``` +cd whisper.cpp ``` Then, download one of the Whisper [models](models/README.md) converted in [`ggml` format](#ggml-format). For example: ```bash -bash ./models/download-ggml-model.sh base.en +sh ./models/download-ggml-model.sh base.en ``` -Now build the [main](examples/main) example and transcribe an audio file like this: +Now build the [whisper-cli](examples/cli) example and transcribe an audio file like this: ```bash -# build the main example -make +# build the project +cmake -B build +cmake --build build -j --config Release # transcribe an audio file -./main -f samples/jfk.wav +./build/bin/whisper-cli -f samples/jfk.wav ``` --- -For a quick demo, simply run `make base.en`: - -```text -$ make base.en - -cc -I. -O3 -std=c11 -pthread -DGGML_USE_ACCELERATE -c ggml.c -o ggml.o -c++ -I. -I./examples -O3 -std=c++11 -pthread -c whisper.cpp -o whisper.o -c++ -I. -I./examples -O3 -std=c++11 -pthread examples/main/main.cpp whisper.o ggml.o -o main -framework Accelerate -./main -h - -usage: ./main [options] file0.wav file1.wav ... - -options: - -h, --help [default] show this help message and exit - -t N, --threads N [4 ] number of threads to use during computation - -p N, --processors N [1 ] number of processors to use during computation - -ot N, --offset-t N [0 ] time offset in milliseconds - -on N, --offset-n N [0 ] segment index offset - -d N, --duration N [0 ] duration of audio to process in milliseconds - -mc N, --max-context N [-1 ] maximum number of text context tokens to store - -ml N, --max-len N [0 ] maximum segment length in characters - -sow, --split-on-word [false ] split on word rather than on token - -bo N, --best-of N [5 ] number of best candidates to keep - -bs N, --beam-size N [5 ] beam size for beam search - -wt N, --word-thold N [0.01 ] word timestamp probability threshold - -et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail - -lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail - -debug, --debug-mode [false ] enable debug mode (eg. dump log_mel) - -tr, --translate [false ] translate from source language to english - -di, --diarize [false ] stereo audio diarization - -tdrz, --tinydiarize [false ] enable tinydiarize (requires a tdrz model) - -nf, --no-fallback [false ] do not use temperature fallback while decoding - -otxt, --output-txt [false ] output result in a text file - -ovtt, --output-vtt [false ] output result in a vtt file - -osrt, --output-srt [false ] output result in a srt file - -olrc, --output-lrc [false ] output result in a lrc file - -owts, --output-words [false ] output script for generating karaoke video - -fp, --font-path [/System/Library/Fonts/Supplemental/Courier New Bold.ttf] path to a monospace font for karaoke video - -ocsv, --output-csv [false ] output result in a CSV file - -oj, --output-json [false ] output result in a JSON file - -ojf, --output-json-full [false ] include more information in the JSON file - -of FNAME, --output-file FNAME [ ] output file path (without file extension) - -ps, --print-special [false ] print special tokens - -pc, --print-colors [false ] print colors - -pp, --print-progress [false ] print progress - -nt, --no-timestamps [false ] do not print timestamps - -l LANG, --language LANG [en ] spoken language ('auto' for auto-detect) - -dl, --detect-language [false ] exit after automatically detecting language - --prompt PROMPT [ ] initial prompt - -m FNAME, --model FNAME [models/ggml-base.en.bin] model path - -f FNAME, --file FNAME [ ] input WAV file path - -oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference - -ls, --log-score [false ] log best decoder scores of tokens - -ng, --no-gpu [false ] disable GPU - - -bash ./models/download-ggml-model.sh base.en -Downloading ggml model base.en ... -ggml-base.en.bin 100%[========================>] 141.11M 6.34MB/s in 24s -Done! Model 'base.en' saved in 'models/ggml-base.en.bin' -You can now use it like this: - - $ ./main -m models/ggml-base.en.bin -f samples/jfk.wav - - -=============================================== -Running base.en on all samples in ./samples ... -=============================================== - ----------------------------------------------- -[+] Running base.en on samples/jfk.wav ... (run 'ffplay samples/jfk.wav' to listen) ----------------------------------------------- - -whisper_init_from_file: loading model from 'models/ggml-base.en.bin' -whisper_model_load: loading model -whisper_model_load: n_vocab = 51864 -whisper_model_load: n_audio_ctx = 1500 -whisper_model_load: n_audio_state = 512 -whisper_model_load: n_audio_head = 8 -whisper_model_load: n_audio_layer = 6 -whisper_model_load: n_text_ctx = 448 -whisper_model_load: n_text_state = 512 -whisper_model_load: n_text_head = 8 -whisper_model_load: n_text_layer = 6 -whisper_model_load: n_mels = 80 -whisper_model_load: f16 = 1 -whisper_model_load: type = 2 -whisper_model_load: mem required = 215.00 MB (+ 6.00 MB per decoder) -whisper_model_load: kv self size = 5.25 MB -whisper_model_load: kv cross size = 17.58 MB -whisper_model_load: adding 1607 extra tokens -whisper_model_load: model ctx = 140.60 MB -whisper_model_load: model size = 140.54 MB - -system_info: n_threads = 4 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 | - -main: processing 'samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ... - - -[00:00:00.000 --> 00:00:11.000] And so my fellow Americans, ask not what your country can do for you, ask what you can do for your country. - - -whisper_print_timings: fallbacks = 0 p / 0 h -whisper_print_timings: load time = 113.81 ms -whisper_print_timings: mel time = 15.40 ms -whisper_print_timings: sample time = 11.58 ms / 27 runs ( 0.43 ms per run) -whisper_print_timings: encode time = 266.60 ms / 1 runs ( 266.60 ms per run) -whisper_print_timings: decode time = 66.11 ms / 27 runs ( 2.45 ms per run) -whisper_print_timings: total time = 476.31 ms -``` +For a quick demo, simply run `make base.en`. The command downloads the `base.en` model converted to custom `ggml` format and runs the inference on all `.wav` samples in the folder `samples`. -For detailed usage instructions, run: `./main -h` +For detailed usage instructions, run: `./build/bin/whisper-cli -h` -Note that the [main](examples/main) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool. +Note that the [whisper-cli](examples/cli) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool. For example, you can use `ffmpeg` like this: ```bash @@ -216,7 +106,7 @@ ffmpeg -i input.mp3 -ar 16000 -ac 1 -c:a pcm_s16le output.wav If you want some extra audio samples to play with, simply run: ``` -make samples +make -j samples ``` This will download a few more audio files from Wikipedia and convert them to 16-bit WAV format via `ffmpeg`. @@ -224,17 +114,18 @@ This will download a few more audio files from Wikipedia and convert them to 16- You can download and run the other models as follows: ``` -make tiny.en -make tiny -make base.en -make base -make small.en -make small -make medium.en -make medium -make large-v1 -make large-v2 -make large-v3 +make -j tiny.en +make -j tiny +make -j base.en +make -j base +make -j small.en +make -j small +make -j medium.en +make -j medium +make -j large-v1 +make -j large-v2 +make -j large-v3 +make -j large-v3-turbo ``` ## Memory usage @@ -247,6 +138,21 @@ make large-v3 | medium | 1.5 GiB | ~2.1 GB | | large | 2.9 GiB | ~3.9 GB | +## POWER VSX Intrinsics + +`whisper.cpp` supports POWER architectures and includes code which +significantly speeds operation on Linux running on POWER9/10, making it +capable of faster-than-realtime transcription on underclocked Raptor +Talos II. Ensure you have a BLAS package installed, and replace the +standard cmake setup with: + +```bash +# build with GGML_BLAS defined +cmake -B build -DGGML_BLAS=1 +cmake --build build -j --config Release +./build/bin/whisper-cli [ .. etc .. ] +``` + ## Quantization `whisper.cpp` supports integer quantization of the Whisper `ggml` models. @@ -256,11 +162,12 @@ Here are the steps for creating and using a quantized model: ```bash # quantize a model with Q5_0 method -make quantize -./quantize models/ggml-base.en.bin models/ggml-base.en-q5_0.bin q5_0 +cmake -B build +cmake --build build -j --config Release +./build/bin/quantize models/ggml-base.en.bin models/ggml-base.en-q5_0.bin q5_0 # run the examples as usual, specifying the quantized model file -./main -m models/ggml-base.en-q5_0.bin ./samples/gb0.wav +./build/bin/whisper-cli -m models/ggml-base.en-q5_0.bin ./samples/gb0.wav ``` ## Core ML support @@ -277,11 +184,11 @@ speed-up - more than x3 faster compared with CPU-only execution. Here are the in ``` - To ensure `coremltools` operates correctly, please confirm that [Xcode](https://developer.apple.com/xcode/) is installed and execute `xcode-select --install` to install the command-line tools. - - Python 3.10 is recommended. + - Python 3.11 is recommended. - MacOS Sonoma (version 14) or newer is recommended, as older versions of MacOS might experience issues with transcription hallucination. - [OPTIONAL] It is recommended to utilize a Python version management system, such as [Miniconda](https://docs.conda.io/en/latest/miniconda.html) for this step: - - To create an environment, use: `conda create -n py310-whisper python=3.10 -y` - - To activate the environment, use: `conda activate py310-whisper` + - To create an environment, use: `conda create -n py311-whisper python=3.11 -y` + - To activate the environment, use: `conda activate py311-whisper` - Generate a Core ML model. For example, to generate a `base.en` model, use: @@ -294,10 +201,6 @@ speed-up - more than x3 faster compared with CPU-only execution. Here are the in - Build `whisper.cpp` with Core ML support: ```bash - # using Makefile - make clean - WHISPER_COREML=1 make -j - # using CMake cmake -B build -DWHISPER_COREML=1 cmake --build build -j --config Release @@ -306,7 +209,7 @@ speed-up - more than x3 faster compared with CPU-only execution. Here are the in - Run the examples as usual. For example: ```text - $ ./main -m models/ggml-base.en.bin -f samples/jfk.wav + $ ./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/jfk.wav ... @@ -322,7 +225,7 @@ speed-up - more than x3 faster compared with CPU-only execution. Here are the in The first run on a device is slow, since the ANE service compiles the Core ML model to some device-specific format. Next runs are faster. -For more information about the Core ML implementation please refer to PR [#566](https://github.com/ggerganov/whisper.cpp/pull/566). +For more information about the Core ML implementation please refer to PR [#566](https://github.com/ggml-org/whisper.cpp/pull/566). ## OpenVINO support @@ -364,7 +267,7 @@ This can result in significant speedup in encoder performance. Here are the inst - Build `whisper.cpp` with OpenVINO support: - Download OpenVINO package from [release page](https://github.com/openvinotoolkit/openvino/releases). The recommended version to use is [2023.0.0](https://github.com/openvinotoolkit/openvino/releases/tag/2023.0.0). + Download OpenVINO package from [release page](https://github.com/openvinotoolkit/openvino/releases). The recommended version to use is [2024.6.0](https://github.com/openvinotoolkit/openvino/releases/tag/2024.6.0). Ready to use Binaries of the required libraries can be found in the [OpenVino Archives](https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/) After downloading & extracting package onto your development system, set up required environment by sourcing setupvars script. For example: @@ -390,7 +293,7 @@ This can result in significant speedup in encoder performance. Here are the inst - Run the examples as usual. For example: ```text - $ ./main -m models/ggml-base.en.bin -f samples/jfk.wav + $ ./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/jfk.wav ... @@ -407,7 +310,7 @@ This can result in significant speedup in encoder performance. Here are the inst The first time run on an OpenVINO device is slow, since the OpenVINO framework will compile the IR (Intermediate Representation) model to a device-specific 'blob'. This device-specific blob will get cached for the next run. -For more information about the Core ML implementation please refer to PR [#1037](https://github.com/ggerganov/whisper.cpp/pull/1037). +For more information about the OpenVINO implementation please refer to PR [#1037](https://github.com/ggml-org/whisper.cpp/pull/1037). ## NVIDIA GPU support @@ -417,8 +320,24 @@ First, make sure you have installed `cuda`: https://developer.nvidia.com/cuda-do Now build `whisper.cpp` with CUDA support: ``` -make clean -WHISPER_CUDA=1 make -j +cmake -B build -DGGML_CUDA=1 +cmake --build build -j --config Release +``` + +or for newer NVIDIA GPU's (RTX 5000 series): +``` +cmake -B build -DGGML_CUDA=1 -DCMAKE_CUDA_ARCHITECTURES="86" +cmake --build build -j --config Release +``` + +## Vulkan GPU support +Cross-vendor solution which allows you to accelerate workload on your GPU. +First, make sure your graphics card driver provides support for Vulkan API. + +Now build `whisper.cpp` with Vulkan support: +``` +cmake -B build -DGGML_VULKAN=1 +cmake --build build -j --config Release ``` ## BLAS CPU support via OpenBLAS @@ -429,23 +348,89 @@ First, make sure you have installed `openblas`: https://www.openblas.net/ Now build `whisper.cpp` with OpenBLAS support: ``` -make clean -WHISPER_OPENBLAS=1 make -j +cmake -B build -DGGML_BLAS=1 +cmake --build build -j --config Release +``` + +## Ascend NPU support + +Ascend NPU provides inference acceleration via [`CANN`](https://www.hiascend.com/en/software/cann) and AI cores. + +First, check if your Ascend NPU device is supported: + +**Verified devices** +| Ascend NPU | Status | +|:-----------------------------:|:-------:| +| Atlas 300T A2 | Support | + +Then, make sure you have installed [`CANN toolkit`](https://www.hiascend.com/en/software/cann/community) . The lasted version of CANN is recommanded. + +Now build `whisper.cpp` with CANN support: + +``` +cmake -B build -DGGML_CANN=1 +cmake --build build -j --config Release +``` + +Run the inference examples as usual, for example: + +``` +./build/bin/whisper-cli -f samples/jfk.wav -m models/ggml-base.en.bin -t 8 +``` + +*Notes:* + +- If you have trouble with Ascend NPU device, please create a issue with **[CANN]** prefix/tag. +- If you run successfully with your Ascend NPU device, please help update the table `Verified devices`. + +## Moore Threads GPU support + +With Moore Threads cards the processing of the models is done efficiently on the GPU via muBLAS and custom MUSA kernels. +First, make sure you have installed `MUSA SDK rc4.2.0`: https://developer.mthreads.com/sdk/download/musa?equipment=&os=&driverVersion=&version=4.2.0 + +Now build `whisper.cpp` with MUSA support: + +``` +cmake -B build -DGGML_MUSA=1 +cmake --build build -j --config Release ``` -## BLAS CPU support via Intel MKL +or specify the architecture for your Moore Threads GPU. For example, if you have a MTT S80 GPU, you can specify the architecture as follows: -Encoder processing can be accelerated on the CPU via the BLAS compatible interface of Intel's Math Kernel Library. -First, make sure you have installed Intel's MKL runtime and development packages: https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl-download.html +``` +cmake -B build -DGGML_MUSA=1 -DMUSA_ARCHITECTURES="21" +cmake --build build -j --config Release +``` + +## FFmpeg support (Linux only) -Now build `whisper.cpp` with Intel MKL BLAS support: +If you want to support more audio formats (such as Opus and AAC), you can turn on the `WHISPER_FFMPEG` build flag to enable FFmpeg integration. + +First, you need to install required libraries: + +```bash +# Debian/Ubuntu +sudo apt install libavcodec-dev libavformat-dev libavutil-dev +# RHEL/Fedora +sudo dnf install libavcodec-free-devel libavformat-free-devel libavutil-free-devel ``` -source /opt/intel/oneapi/setvars.sh -mkdir build -cd build -cmake -DWHISPER_MKL=ON .. -WHISPER_MKL=1 make -j + +Then you can build the project as follows: + +```bash +cmake -B build -D WHISPER_FFMPEG=yes +cmake --build build +``` + +Run the following example to confirm it's working: + +```bash +# Convert an audio file to Opus format +ffmpeg -i samples/jfk.wav jfk.opus + +# Transcribe the audio file +./build/bin/whisper-cli --model models/ggml-base.en.bin --file jfk.opus ``` ## Docker @@ -459,8 +444,9 @@ WHISPER_MKL=1 make -j We have two Docker images available for this project: -1. `ghcr.io/ggerganov/whisper.cpp:main`: This image includes the main executable file as well as `curl` and `ffmpeg`. (platforms: `linux/amd64`, `linux/arm64`) -2. `ghcr.io/ggerganov/whisper.cpp:main-cuda`: Same as `main` but compiled with CUDA support. (platforms: `linux/amd64`) +1. `ghcr.io/ggml-org/whisper.cpp:main`: This image includes the main executable file as well as `curl` and `ffmpeg`. (platforms: `linux/amd64`, `linux/arm64`) +2. `ghcr.io/ggml-org/whisper.cpp:main-cuda`: Same as `main` but compiled with CUDA support. (platforms: `linux/amd64`) +3. `ghcr.io/ggml-org/whisper.cpp:main-musa`: Same as `main` but compiled with MUSA support. (platforms: `linux/amd64`) ### Usage @@ -473,11 +459,11 @@ docker run -it --rm \ docker run -it --rm \ -v path/to/models:/models \ -v path/to/audios:/audios \ - whisper.cpp:main "./main -m /models/ggml-base.bin -f /audios/jfk.wav" + whisper.cpp:main "whisper-cli -m /models/ggml-base.bin -f /audios/jfk.wav" # transcribe an audio file in samples folder docker run -it --rm \ -v path/to/models:/models \ - whisper.cpp:main "./main -m /models/ggml-base.bin -f ./samples/jfk.wav" + whisper.cpp:main "whisper-cli -m /models/ggml-base.bin -f ./samples/jfk.wav" ``` ## Installing with Conan @@ -494,98 +480,17 @@ For detailed instructions on how to use Conan, please refer to the [Conan docume - Inference only -## Another example - -Here is another example of transcribing a [3:24 min speech](https://upload.wikimedia.org/wikipedia/commons/1/1f/George_W_Bush_Columbia_FINAL.ogg) -in about half a minute on a MacBook M1 Pro, using `medium.en` model: - -
- Expand to see the result - -```text -$ ./main -m models/ggml-medium.en.bin -f samples/gb1.wav -t 8 - -whisper_init_from_file: loading model from 'models/ggml-medium.en.bin' -whisper_model_load: loading model -whisper_model_load: n_vocab = 51864 -whisper_model_load: n_audio_ctx = 1500 -whisper_model_load: n_audio_state = 1024 -whisper_model_load: n_audio_head = 16 -whisper_model_load: n_audio_layer = 24 -whisper_model_load: n_text_ctx = 448 -whisper_model_load: n_text_state = 1024 -whisper_model_load: n_text_head = 16 -whisper_model_load: n_text_layer = 24 -whisper_model_load: n_mels = 80 -whisper_model_load: f16 = 1 -whisper_model_load: type = 4 -whisper_model_load: mem required = 1720.00 MB (+ 43.00 MB per decoder) -whisper_model_load: kv self size = 42.00 MB -whisper_model_load: kv cross size = 140.62 MB -whisper_model_load: adding 1607 extra tokens -whisper_model_load: model ctx = 1462.35 MB -whisper_model_load: model size = 1462.12 MB - -system_info: n_threads = 8 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 | - -main: processing 'samples/gb1.wav' (3179750 samples, 198.7 sec), 8 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ... - - -[00:00:00.000 --> 00:00:08.000] My fellow Americans, this day has brought terrible news and great sadness to our country. -[00:00:08.000 --> 00:00:17.000] At nine o'clock this morning, Mission Control in Houston lost contact with our Space Shuttle Columbia. -[00:00:17.000 --> 00:00:23.000] A short time later, debris was seen falling from the skies above Texas. -[00:00:23.000 --> 00:00:29.000] The Columbia's lost. There are no survivors. -[00:00:29.000 --> 00:00:32.000] On board was a crew of seven. -[00:00:32.000 --> 00:00:39.000] Colonel Rick Husband, Lieutenant Colonel Michael Anderson, Commander Laurel Clark, -[00:00:39.000 --> 00:00:48.000] Captain David Brown, Commander William McCool, Dr. Kultna Shavla, and Ilan Ramon, -[00:00:48.000 --> 00:00:52.000] a colonel in the Israeli Air Force. -[00:00:52.000 --> 00:00:58.000] These men and women assumed great risk in the service to all humanity. -[00:00:58.000 --> 00:01:03.000] In an age when space flight has come to seem almost routine, -[00:01:03.000 --> 00:01:07.000] it is easy to overlook the dangers of travel by rocket -[00:01:07.000 --> 00:01:12.000] and the difficulties of navigating the fierce outer atmosphere of the Earth. -[00:01:12.000 --> 00:01:18.000] These astronauts knew the dangers, and they faced them willingly, -[00:01:18.000 --> 00:01:23.000] knowing they had a high and noble purpose in life. -[00:01:23.000 --> 00:01:31.000] Because of their courage and daring and idealism, we will miss them all the more. -[00:01:31.000 --> 00:01:36.000] All Americans today are thinking as well of the families of these men and women -[00:01:36.000 --> 00:01:40.000] who have been given this sudden shock and grief. -[00:01:40.000 --> 00:01:45.000] You're not alone. Our entire nation grieves with you, -[00:01:45.000 --> 00:01:52.000] and those you love will always have the respect and gratitude of this country. -[00:01:52.000 --> 00:01:56.000] The cause in which they died will continue. -[00:01:56.000 --> 00:02:04.000] Mankind is led into the darkness beyond our world by the inspiration of discovery -[00:02:04.000 --> 00:02:11.000] and the longing to understand. Our journey into space will go on. -[00:02:11.000 --> 00:02:16.000] In the skies today, we saw destruction and tragedy. -[00:02:16.000 --> 00:02:22.000] Yet farther than we can see, there is comfort and hope. -[00:02:22.000 --> 00:02:29.000] In the words of the prophet Isaiah, "Lift your eyes and look to the heavens -[00:02:29.000 --> 00:02:35.000] who created all these. He who brings out the starry hosts one by one -[00:02:35.000 --> 00:02:39.000] and calls them each by name." -[00:02:39.000 --> 00:02:46.000] Because of His great power and mighty strength, not one of them is missing. -[00:02:46.000 --> 00:02:55.000] The same Creator who names the stars also knows the names of the seven souls we mourn today. -[00:02:55.000 --> 00:03:01.000] The crew of the shuttle Columbia did not return safely to earth, -[00:03:01.000 --> 00:03:05.000] yet we can pray that all are safely home. -[00:03:05.000 --> 00:03:13.000] May God bless the grieving families, and may God continue to bless America. -[00:03:13.000 --> 00:03:19.000] [Silence] - - -whisper_print_timings: fallbacks = 1 p / 0 h -whisper_print_timings: load time = 569.03 ms -whisper_print_timings: mel time = 146.85 ms -whisper_print_timings: sample time = 238.66 ms / 553 runs ( 0.43 ms per run) -whisper_print_timings: encode time = 18665.10 ms / 9 runs ( 2073.90 ms per run) -whisper_print_timings: decode time = 13090.93 ms / 549 runs ( 23.85 ms per run) -whisper_print_timings: total time = 32733.52 ms -``` - -
- ## Real-time audio input example This is a naive example of performing real-time inference on audio from your microphone. The [stream](examples/stream) tool samples the audio every half a second and runs the transcription continuously. -More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10). +More info is available in [issue #10](https://github.com/ggml-org/whisper.cpp/issues/10). +You will need to have [sdl2](https://wiki.libsdl.org/SDL2/Installation) installed for it to work properly. ```bash -make stream -./stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000 +cmake -B build -DWHISPER_SDL2=ON +cmake --build build -j --config Release +./build/bin/whisper-stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000 ``` https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a80f-28ba83be7d09.mp4 @@ -596,7 +501,7 @@ Adding the `--print-colors` argument will print the transcribed text using an ex to highlight words with high or low confidence: ```bash -./main -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors +./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors ``` image @@ -606,7 +511,7 @@ to highlight words with high or low confidence: For example, to limit the line length to a maximum of 16 characters, simply add `-ml 16`: ```text -$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16 +$ ./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16 whisper_model_load: loading model from './models/ggml-base.en.bin' ... @@ -630,7 +535,7 @@ main: processing './samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 pr The `--max-len` argument can be used to obtain word-level timestamps. Simply use `-ml 1`: ```text -$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1 +$ ./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1 whisper_model_load: loading model from './models/ggml-base.en.bin' ... @@ -668,7 +573,7 @@ main: processing './samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 pr ## Speaker segmentation via tinydiarize (experimental) -More information about this approach is available here: https://github.com/ggerganov/whisper.cpp/pull/1058 +More information about this approach is available here: https://github.com/ggml-org/whisper.cpp/pull/1058 Sample usage: @@ -677,7 +582,7 @@ Sample usage: ./models/download-ggml-model.sh small.en-tdrz # run as usual, adding the "-tdrz" command-line argument -./main -f ./samples/a13.wav -m ./models/ggml-small.en-tdrz.bin -tdrz +./build/bin/whisper-cli -f ./samples/a13.wav -m ./models/ggml-small.en-tdrz.bin -tdrz ... main: processing './samples/a13.wav' (480000 samples, 30.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, tdrz = 1, timestamps = 1 ... ... @@ -694,14 +599,14 @@ main: processing './samples/a13.wav' (480000 samples, 30.0 sec), 4 threads, 1 pr ## Karaoke-style movie generation (experimental) -The [main](examples/main) example provides support for output of karaoke-style movies, where the -currently pronounced word is highlighted. Use the `-wts` argument and run the generated bash script. +The [whisper-cli](examples/cli) example provides support for output of karaoke-style movies, where the +currently pronounced word is highlighted. Use the `-owts` argument and run the generated bash script. This requires to have `ffmpeg` installed. Here are a few _"typical"_ examples: ```bash -./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts +./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts source ./samples/jfk.wav.wts ffplay ./samples/jfk.wav.mp4 ``` @@ -711,7 +616,7 @@ https://user-images.githubusercontent.com/1991296/199337465-dbee4b5e-9aeb-48a3-b --- ```bash -./main -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts +./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts source ./samples/mm0.wav.wts ffplay ./samples/mm0.wav.mp4 ``` @@ -721,7 +626,7 @@ https://user-images.githubusercontent.com/1991296/199337504-cc8fd233-0cb7-4920-9 --- ```bash -./main -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts +./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts source ./samples/gb0.wav.wts ffplay ./samples/gb0.wav.mp4 ``` @@ -732,7 +637,7 @@ https://user-images.githubusercontent.com/1991296/199337538-b7b0c7a3-2753-4a88-a ## Video comparison of different models -Use the [scripts/bench-wts.sh](https://github.com/ggerganov/whisper.cpp/blob/master/scripts/bench-wts.sh) script to generate a video in the following format: +Use the [scripts/bench-wts.sh](https://github.com/ggml-org/whisper.cpp/blob/master/scripts/bench-wts.sh) script to generate a video in the following format: ```bash ./scripts/bench-wts.sh samples/jfk.wav @@ -746,12 +651,12 @@ https://user-images.githubusercontent.com/1991296/223206245-2d36d903-cf8e-4f09-8 ## Benchmarks In order to have an objective comparison of the performance of the inference across different system configurations, -use the [bench](examples/bench) tool. The tool simply runs the Encoder part of the model and prints how much time it +use the [whisper-bench](examples/bench) tool. The tool simply runs the Encoder part of the model and prints how much time it took to execute it. The results are summarized in the following Github issue: -[Benchmark results](https://github.com/ggerganov/whisper.cpp/issues/89) +[Benchmark results](https://github.com/ggml-org/whisper.cpp/issues/89) -Additionally a script to run whisper.cpp with different models and audio files is provided [bench.py](bench.py). +Additionally a script to run whisper.cpp with different models and audio files is provided [bench.py](scripts/bench.py). You can run it with the following command, by default it will run against any standard model in the models folder. @@ -776,31 +681,143 @@ You can download the converted models using the [models/download-ggml-model.sh]( or manually from here: - https://huggingface.co/ggerganov/whisper.cpp -- https://ggml.ggerganov.com For more details, see the conversion script [models/convert-pt-to-ggml.py](models/convert-pt-to-ggml.py) or [models/README.md](models/README.md). -## [Bindings](https://github.com/ggerganov/whisper.cpp/discussions/categories/bindings) +## [Bindings](https://github.com/ggml-org/whisper.cpp/discussions/categories/bindings) -- [x] Rust: [tazz4843/whisper-rs](https://github.com/tazz4843/whisper-rs) | [#310](https://github.com/ggerganov/whisper.cpp/discussions/310) -- [x] JavaScript: [bindings/javascript](bindings/javascript) | [#309](https://github.com/ggerganov/whisper.cpp/discussions/309) +- [x] Rust: [tazz4843/whisper-rs](https://github.com/tazz4843/whisper-rs) | [#310](https://github.com/ggml-org/whisper.cpp/discussions/310) +- [x] JavaScript: [bindings/javascript](bindings/javascript) | [#309](https://github.com/ggml-org/whisper.cpp/discussions/309) - React Native (iOS / Android): [whisper.rn](https://github.com/mybigday/whisper.rn) -- [x] Go: [bindings/go](bindings/go) | [#312](https://github.com/ggerganov/whisper.cpp/discussions/312) +- [x] Go: [bindings/go](bindings/go) | [#312](https://github.com/ggml-org/whisper.cpp/discussions/312) - [x] Java: - [GiviMAD/whisper-jni](https://github.com/GiviMAD/whisper-jni) -- [x] Ruby: [bindings/ruby](bindings/ruby) | [#507](https://github.com/ggerganov/whisper.cpp/discussions/507) -- [x] Objective-C / Swift: [ggerganov/whisper.spm](https://github.com/ggerganov/whisper.spm) | [#313](https://github.com/ggerganov/whisper.cpp/discussions/313) +- [x] Ruby: [bindings/ruby](bindings/ruby) | [#507](https://github.com/ggml-org/whisper.cpp/discussions/507) +- [x] Objective-C / Swift: [ggml-org/whisper.spm](https://github.com/ggml-org/whisper.spm) | [#313](https://github.com/ggml-org/whisper.cpp/discussions/313) - [exPHAT/SwiftWhisper](https://github.com/exPHAT/SwiftWhisper) -- [x] .NET: | [#422](https://github.com/ggerganov/whisper.cpp/discussions/422) +- [x] .NET: | [#422](https://github.com/ggml-org/whisper.cpp/discussions/422) - [sandrohanea/whisper.net](https://github.com/sandrohanea/whisper.net) - [NickDarvey/whisper](https://github.com/NickDarvey/whisper) -- [x] Python: | [#9](https://github.com/ggerganov/whisper.cpp/issues/9) +- [x] Python: | [#9](https://github.com/ggml-org/whisper.cpp/issues/9) - [stlukey/whispercpp.py](https://github.com/stlukey/whispercpp.py) (Cython) - [AIWintermuteAI/whispercpp](https://github.com/AIWintermuteAI/whispercpp) (Updated fork of aarnphm/whispercpp) - [aarnphm/whispercpp](https://github.com/aarnphm/whispercpp) (Pybind11) + - [abdeladim-s/pywhispercpp](https://github.com/abdeladim-s/pywhispercpp) (Pybind11) - [x] R: [bnosac/audio.whisper](https://github.com/bnosac/audio.whisper) - [x] Unity: [macoron/whisper.unity](https://github.com/Macoron/whisper.unity) +## XCFramework +The XCFramework is a precompiled version of the library for iOS, visionOS, tvOS, +and macOS. It can be used in Swift projects without the need to compile the +library from source. For example, the v1.7.5 version of the XCFramework can be +used as follows: + +```swift +// swift-tools-version: 5.10 +// The swift-tools-version declares the minimum version of Swift required to build this package. + +import PackageDescription + +let package = Package( + name: "Whisper", + targets: [ + .executableTarget( + name: "Whisper", + dependencies: [ + "WhisperFramework" + ]), + .binaryTarget( + name: "WhisperFramework", + url: "https://github.com/ggml-org/whisper.cpp/releases/download/v1.7.5/whisper-v1.7.5-xcframework.zip", + checksum: "c7faeb328620d6012e130f3d705c51a6ea6c995605f2df50f6e1ad68c59c6c4a" + ) + ] +) +``` + +## Voice Activity Detection (VAD) +Support for Voice Activity Detection (VAD) can be enabled using the `--vad` +argument to `whisper-cli`. In addition to this option a VAD model is also +required. + +The way this works is that first the audio samples are passed through +the VAD model which will detect speech segments. Using this information the +only the speech segments that are detected are extracted from the original audio +input and passed to whisper for processing. This reduces the amount of audio +data that needs to be processed by whisper and can significantly speed up the +transcription process. + +The following VAD models are currently supported: + +### Silero-VAD +[Silero-vad](https://github.com/snakers4/silero-vad) is a lightweight VAD model +written in Python that is fast and accurate. + +Models can be downloaded by running the following command on Linux or MacOS: +```console +$ ./models/download-vad-model.sh silero-v5.1.2 +Downloading ggml model silero-v5.1.2 from 'https://huggingface.co/ggml-org/whisper-vad' ... +ggml-silero-v5.1.2.bin 100%[==============================================>] 864.35K --.-KB/s in 0.04s +Done! Model 'silero-v5.1.2' saved in '/path/models/ggml-silero-v5.1.2.bin' +You can now use it like this: + + $ ./build/bin/whisper-cli -vm /path/models/ggml-silero-v5.1.2.bin --vad -f samples/jfk.wav -m models/ggml-base.en.bin + +``` +And the following command on Windows: +```console +> .\models\download-vad-model.cmd silero-v5.1.2 +Downloading vad model silero-v5.1.2... +Done! Model silero-v5.1.2 saved in C:\Users\danie\work\ai\whisper.cpp\ggml-silero-v5.1.2.bin +You can now use it like this: + +C:\path\build\bin\Release\whisper-cli.exe -vm C:\path\ggml-silero-v5.1.2.bin --vad -m models/ggml-base.en.bin -f samples\jfk.wav + +``` + +To see a list of all available models, run the above commands without any +arguments. + +This model can be also be converted manually to ggml using the following command: +```console +$ python3 -m venv venv && source venv/bin/activate +$ (venv) pip install silero-vad +$ (venv) $ python models/convert-silero-vad-to-ggml.py --output models/silero.bin +Saving GGML Silero-VAD model to models/silero-v5.1.2-ggml.bin +``` +And it can then be used with whisper as follows: +```console +$ ./build/bin/whisper-cli \ + --file ./samples/jfk.wav \ + --model ./models/ggml-base.en.bin \ + --vad \ + --vad-model ./models/silero-v5.1.2-ggml.bin +``` + +### VAD Options + +* --vad-threshold: Threshold probability for speech detection. A probability +for a speech segment/frame above this threshold will be considered as speech. + +* --vad-min-speech-duration-ms: Minimum speech duration in milliseconds. Speech +segments shorter than this value will be discarded to filter out brief noise or +false positives. + +* --vad-min-silence-duration-ms: Minimum silence duration in milliseconds. Silence +periods must be at least this long to end a speech segment. Shorter silence +periods will be ignored and included as part of the speech. + +* --vad-max-speech-duration-s: Maximum speech duration in seconds. Speech segments +longer than this will be automatically split into multiple segments at silence +points exceeding 98ms to prevent excessively long segments. + +* --vad-speech-pad-ms: Speech padding in milliseconds. Adds this amount of padding +before and after each detected speech segment to avoid cutting off speech edges. + +* --vad-samples-overlap: Amount of audio to extend from each speech segment into +the next one, in seconds (e.g., 0.10 = 100ms overlap). This ensures speech isn't +cut off abruptly between segments when they're concatenated together. + ## Examples There are various examples of using the library for different projects in the [examples](examples) folder. @@ -808,25 +825,24 @@ Some of the examples are even ported to run in the browser using WebAssembly. Ch | Example | Web | Description | | --------------------------------------------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| [main](examples/main) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper | -| [bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine | -| [stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture | -| [command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic | -| [wchess](examples/wchess) | [wchess.wasm](examples/wchess) | Voice-controlled chess | -| [talk](examples/talk) | [talk.wasm](examples/talk.wasm) | Talk with a GPT-2 bot | -| [talk-llama](examples/talk-llama) | | Talk with a LLaMA bot | +| [whisper-cli](examples/cli) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper | +| [whisper-bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine | +| [whisper-stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture | +| [whisper-command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic | +| [whisper-server](examples/server) | | HTTP transcription server with OAI-like API | +| [whisper-talk-llama](examples/talk-llama) | | Talk with a LLaMA bot | | [whisper.objc](examples/whisper.objc) | | iOS mobile application using whisper.cpp | | [whisper.swiftui](examples/whisper.swiftui) | | SwiftUI iOS / macOS application using whisper.cpp | | [whisper.android](examples/whisper.android) | | Android mobile application using whisper.cpp | | [whisper.nvim](examples/whisper.nvim) | | Speech-to-text plugin for Neovim | | [generate-karaoke.sh](examples/generate-karaoke.sh) | | Helper script to easily [generate a karaoke video](https://youtu.be/uj7hVta4blM) of raw audio capture | -| [livestream.sh](examples/livestream.sh) | | [Livestream audio transcription](https://github.com/ggerganov/whisper.cpp/issues/185) | +| [livestream.sh](examples/livestream.sh) | | [Livestream audio transcription](https://github.com/ggml-org/whisper.cpp/issues/185) | | [yt-wsp.sh](examples/yt-wsp.sh) | | Download + transcribe and/or translate any VOD [(original)](https://gist.github.com/DaniruKun/96f763ec1a037cc92fe1a059b643b818) | -| [server](examples/server) | | HTTP transcription server with OAI-like API | +| [wchess](examples/wchess) | [wchess.wasm](examples/wchess) | Voice-controlled chess | -## [Discussions](https://github.com/ggerganov/whisper.cpp/discussions) +## [Discussions](https://github.com/ggml-org/whisper.cpp/discussions) If you have any kind of feedback about this project feel free to use the Discussions section and open a new topic. -You can use the [Show and tell](https://github.com/ggerganov/whisper.cpp/discussions/categories/show-and-tell) category +You can use the [Show and tell](https://github.com/ggml-org/whisper.cpp/discussions/categories/show-and-tell) category to share your own projects that use `whisper.cpp`. If you have a question, make sure to check the -[Frequently asked questions (#126)](https://github.com/ggerganov/whisper.cpp/discussions/126) discussion. +[Frequently asked questions (#126)](https://github.com/ggml-org/whisper.cpp/discussions/126) discussion. diff --git a/README_sycl.md b/README_sycl.md index 9ea2a7908ab..2d31d284e5a 100644 --- a/README_sycl.md +++ b/README_sycl.md @@ -1,249 +1,249 @@ -# whisper.cpp for SYCL - -[Background](#background) - -[OS](#os) - -[Intel GPU](#intel-gpu) - -[Linux](#linux) - -[Environment Variable](#environment-variable) - -[Known Issue](#known-issue) - -[Todo](#todo) - -## Background - -SYCL is a higher-level programming model to improve programming productivity on various hardware accelerators�such as CPUs, GPUs, and FPGAs. It is a single-source embedded domain-specific language based on pure C++17. - -oneAPI is a specification that is open and standards-based, supporting multiple architecture types including but not limited to GPU, CPU, and FPGA. The spec has both direct programming and API-based programming paradigms. - -Intel uses the SYCL as direct programming language to support CPU, GPUs and FPGAs. - -To avoid re-inventing the wheel, this code refers other code paths in llama.cpp (like OpenBLAS, cuBLAS, CLBlast). We use a open-source tool [SYCLomatic](https://github.com/oneapi-src/SYCLomatic) (Commercial release [Intel� DPC++ Compatibility Tool](https://www.intel.com/content/www/us/en/developer/tools/oneapi/dpc-compatibility-tool.html)) migrate to SYCL. - -The whisper.cpp for SYCL is used to support Intel GPUs. - -For Intel CPU, recommend to use whisper.cpp for X86 (Intel MKL build). - -## OS - -|OS|Status|Verified| -|-|-|-| -|Linux|Support|Ubuntu 22.04| -|Windows|Ongoing| | - - -## Intel GPU - -|Intel GPU| Status | Verified Model| -|-|-|-| -|Intel Data Center Max Series| Support| Max 1550| -|Intel Data Center Flex Series| Support| Flex 170| -|Intel Arc Series| Support| Arc 770| -|Intel built-in Arc GPU| Support| built-in Arc GPU in Meteor Lake| -|Intel iGPU| Support| iGPU in i5-1250P, i7-1165G7| - - -## Linux - -### Setup Environment - -1. Install Intel GPU driver. - -a. Please install Intel GPU driver by official guide: [Install GPU Drivers](https://dgpu-docs.intel.com/driver/installation.html). - -Note: for iGPU, please install the client GPU driver. - -b. Add user to group: video, render. - -``` -sudo usermod -aG render username -sudo usermod -aG video username -``` - -Note: re-login to enable it. - -c. Check - -``` -sudo apt install clinfo -sudo clinfo -l -``` - -Output (example): - -``` -Platform #0: Intel(R) OpenCL Graphics - `-- Device #0: Intel(R) Arc(TM) A770 Graphics - - -Platform #0: Intel(R) OpenCL HD Graphics - `-- Device #0: Intel(R) Iris(R) Xe Graphics [0x9a49] -``` - -2. Install Intel� oneAPI Base toolkit. - - -a. Please follow the procedure in [Get the Intel� oneAPI Base Toolkit ](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html). - -Recommend to install to default folder: **/opt/intel/oneapi**. - -Following guide use the default folder as example. If you use other folder, please modify the following guide info with your folder. - -b. Check - -``` -source /opt/intel/oneapi/setvars.sh - -sycl-ls -``` - -There should be one or more level-zero devices. Like **[ext_oneapi_level_zero:gpu:0]**. - -Output (example): -``` -[opencl:acc:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.10.0.17_160000] -[opencl:cpu:1] Intel(R) OpenCL, 13th Gen Intel(R) Core(TM) i7-13700K OpenCL 3.0 (Build 0) [2023.16.10.0.17_160000] -[opencl:gpu:2] Intel(R) OpenCL Graphics, Intel(R) Arc(TM) A770 Graphics OpenCL 3.0 NEO [23.30.26918.50] -[ext_oneapi_level_zero:gpu:0] Intel(R) Level-Zero, Intel(R) Arc(TM) A770 Graphics 1.3 [1.3.26918] - -``` - -2. Build locally: - -``` -mkdir -p build -cd build -source /opt/intel/oneapi/setvars.sh - -#for FP16 -#cmake .. -DWHISPER_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DWHISPER_SYCL_F16=ON - -#for FP32 -cmake .. -DWHISPER_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx - -#build example/main only -#cmake --build . --config Release --target main - -#build all binary -cmake --build . --config Release -v - -``` - -or - -``` -./examples/sycl/build.sh -``` - -Note: - -- By default, it will build for all binary files. It will take more time. To reduce the time, we recommend to build for **example/main** only. - -### Run - -1. Put model file to folder **models** - -2. Enable oneAPI running environment - -``` -source /opt/intel/oneapi/setvars.sh -``` - -3. List device ID - -Run without parameter: - -``` -./build/bin/ls-sycl-device - -or - -./build/bin/main -``` - -Check the ID in startup log, like: - -``` -found 4 SYCL devices: - Device 0: Intel(R) Arc(TM) A770 Graphics, compute capability 1.3, - max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136 - Device 1: Intel(R) FPGA Emulation Device, compute capability 1.2, - max compute_units 24, max work group size 67108864, max sub group size 64, global mem size 67065057280 - Device 2: 13th Gen Intel(R) Core(TM) i7-13700K, compute capability 3.0, - max compute_units 24, max work group size 8192, max sub group size 64, global mem size 67065057280 - Device 3: Intel(R) Arc(TM) A770 Graphics, compute capability 3.0, - max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136 - -``` - -|Attribute|Note| -|-|-| -|compute capability 1.3|Level-zero running time, recommended | -|compute capability 3.0|OpenCL running time, slower than level-zero in most cases| - -4. Set device ID and execute whisper.cpp - -Set device ID = 0 by **GGML_SYCL_DEVICE=0** - -``` -GGML_SYCL_DEVICE=0 ./build/bin/main -m models/ggml-base.en.bin -f samples/jfk.wav -``` -or run by script: - -``` -./examples/sycl/run_whisper.sh -``` - - - -5. Check the device ID in output - -Like: -``` -Using device **0** (Intel(R) Arc(TM) A770 Graphics) as main device -``` - - -## Environment Variable - -#### Build - -|Name|Value|Function| -|-|-|-| -|WHISPER_SYCL|ON (mandatory)|Enable build with SYCL code path.
For FP32/FP16, WHISPER_SYCL=ON is mandatory.| -|WHISPER_SYCL_F16|ON (optional)|Enable FP16 build with SYCL code path.For FP32, do not set it.| -|CMAKE_C_COMPILER|icx|Use icx compiler for SYCL code path| -|CMAKE_CXX_COMPILER|icpx|use icpx for SYCL code path| - -#### Running - - -|Name|Value|Function| -|-|-|-| -|GGML_SYCL_DEVICE|0 (default) or 1|Set the device id used. Check the device ids by default running output| -|GGML_SYCL_DEBUG|0 (default) or 1|Enable log function by macro: GGML_SYCL_DEBUG| - -## Known Issue - -- Error: `error while loading shared libraries: libsycl.so.7: cannot open shared object file: No such file or directory`. - - Miss to enable oneAPI running environment. - - Install oneAPI base toolkit and enable it by: `source /opt/intel/oneapi/setvars.sh`. - - -- Hang during startup - - llama.cpp use mmap as default way to read model file and copy to GPU. In some system, memcpy will be abnormal and block. - - Solution: add **--no-mmap**. - -## Todo - -- Support to build in Windows. - -- Support multiple cards. \ No newline at end of file +# whisper.cpp for SYCL + +[Background](#background) + +[OS](#os) + +[Intel GPU](#intel-gpu) + +[Linux](#linux) + +[Environment Variable](#environment-variable) + +[Known Issue](#known-issue) + +[Todo](#todo) + +## Background + +SYCL is a higher-level programming model to improve programming productivity on various hardware accelerators—such as CPUs, GPUs, and FPGAs. It is a single-source embedded domain-specific language based on pure C++17. + +oneAPI is a specification that is open and standards-based, supporting multiple architecture types including but not limited to GPU, CPU, and FPGA. The spec has both direct programming and API-based programming paradigms. + +Intel uses the SYCL as direct programming language to support CPU, GPUs and FPGAs. + +To avoid re-inventing the wheel, this code refers other code paths in llama.cpp (like OpenBLAS, cuBLAS, CLBlast). We use a open-source tool [SYCLomatic](https://github.com/oneapi-src/SYCLomatic) (Commercial release [Intel® DPC++ Compatibility Tool](https://www.intel.com/content/www/us/en/developer/tools/oneapi/dpc-compatibility-tool.html)) migrate to SYCL. + +The whisper.cpp for SYCL is used to support Intel GPUs. + +For Intel CPU, recommend to use whisper.cpp for X86 (Intel MKL build). + +## OS + +|OS|Status|Verified| +|-|-|-| +|Linux|Support|Ubuntu 22.04| +|Windows|Ongoing| | + + +## Intel GPU + +|Intel GPU| Status | Verified Model| +|-|-|-| +|Intel Data Center Max Series| Support| Max 1550| +|Intel Data Center Flex Series| Support| Flex 170| +|Intel Arc Series| Support| Arc 770| +|Intel built-in Arc GPU| Support| built-in Arc GPU in Meteor Lake| +|Intel iGPU| Support| iGPU in i5-1250P, i7-1165G7| + + +## Linux + +### Setup Environment + +1. Install Intel GPU driver. + +a. Please install Intel GPU driver by official guide: [Install GPU Drivers](https://dgpu-docs.intel.com/driver/installation.html). + +Note: for iGPU, please install the client GPU driver. + +b. Add user to group: video, render. + +``` +sudo usermod -aG render username +sudo usermod -aG video username +``` + +Note: re-login to enable it. + +c. Check + +``` +sudo apt install clinfo +sudo clinfo -l +``` + +Output (example): + +``` +Platform #0: Intel(R) OpenCL Graphics + `-- Device #0: Intel(R) Arc(TM) A770 Graphics + + +Platform #0: Intel(R) OpenCL HD Graphics + `-- Device #0: Intel(R) Iris(R) Xe Graphics [0x9a49] +``` + +2. Install Intel® oneAPI Base toolkit. + + +a. Please follow the procedure in [Get the Intel® oneAPI Base Toolkit ](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html). + +Recommend to install to default folder: **/opt/intel/oneapi**. + +Following guide use the default folder as example. If you use other folder, please modify the following guide info with your folder. + +b. Check + +``` +source /opt/intel/oneapi/setvars.sh + +sycl-ls +``` + +There should be one or more level-zero devices. Like **[ext_oneapi_level_zero:gpu:0]**. + +Output (example): +``` +[opencl:acc:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.10.0.17_160000] +[opencl:cpu:1] Intel(R) OpenCL, 13th Gen Intel(R) Core(TM) i7-13700K OpenCL 3.0 (Build 0) [2023.16.10.0.17_160000] +[opencl:gpu:2] Intel(R) OpenCL Graphics, Intel(R) Arc(TM) A770 Graphics OpenCL 3.0 NEO [23.30.26918.50] +[ext_oneapi_level_zero:gpu:0] Intel(R) Level-Zero, Intel(R) Arc(TM) A770 Graphics 1.3 [1.3.26918] + +``` + +2. Build locally: + +``` +mkdir -p build +cd build +source /opt/intel/oneapi/setvars.sh + +#for FP16 +#cmake .. -DWHISPER_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DWHISPER_SYCL_F16=ON + +#for FP32 +cmake .. -DWHISPER_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx + +#build example/main only +#cmake --build . --config Release --target main + +#build all binary +cmake --build . --config Release -v + +``` + +or + +``` +./examples/sycl/build.sh +``` + +Note: + +- By default, it will build for all binary files. It will take more time. To reduce the time, we recommend to build for **example/main** only. + +### Run + +1. Put model file to folder **models** + +2. Enable oneAPI running environment + +``` +source /opt/intel/oneapi/setvars.sh +``` + +3. List device ID + +Run without parameter: + +``` +./build/bin/ls-sycl-device + +or + +./build/bin/main +``` + +Check the ID in startup log, like: + +``` +found 4 SYCL devices: + Device 0: Intel(R) Arc(TM) A770 Graphics, compute capability 1.3, + max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136 + Device 1: Intel(R) FPGA Emulation Device, compute capability 1.2, + max compute_units 24, max work group size 67108864, max sub group size 64, global mem size 67065057280 + Device 2: 13th Gen Intel(R) Core(TM) i7-13700K, compute capability 3.0, + max compute_units 24, max work group size 8192, max sub group size 64, global mem size 67065057280 + Device 3: Intel(R) Arc(TM) A770 Graphics, compute capability 3.0, + max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136 + +``` + +|Attribute|Note| +|-|-| +|compute capability 1.3|Level-zero running time, recommended | +|compute capability 3.0|OpenCL running time, slower than level-zero in most cases| + +4. Set device ID and execute whisper.cpp + +Set device ID = 0 by **GGML_SYCL_DEVICE=0** + +``` +GGML_SYCL_DEVICE=0 ./build/bin/main -m models/ggml-base.en.bin -f samples/jfk.wav +``` +or run by script: + +``` +./examples/sycl/run_whisper.sh +``` + + + +5. Check the device ID in output + +Like: +``` +Using device **0** (Intel(R) Arc(TM) A770 Graphics) as main device +``` + + +## Environment Variable + +#### Build + +|Name|Value|Function| +|-|-|-| +|WHISPER_SYCL|ON (mandatory)|Enable build with SYCL code path.
For FP32/FP16, WHISPER_SYCL=ON is mandatory.| +|WHISPER_SYCL_F16|ON (optional)|Enable FP16 build with SYCL code path.For FP32, do not set it.| +|CMAKE_C_COMPILER|icx|Use icx compiler for SYCL code path| +|CMAKE_CXX_COMPILER|icpx|use icpx for SYCL code path| + +#### Running + + +|Name|Value|Function| +|-|-|-| +|GGML_SYCL_DEVICE|0 (default) or 1|Set the device id used. Check the device ids by default running output| +|GGML_SYCL_DEBUG|0 (default) or 1|Enable log function by macro: GGML_SYCL_DEBUG| + +## Known Issue + +- Error: `error while loading shared libraries: libsycl.so.7: cannot open shared object file: No such file or directory`. + + Miss to enable oneAPI running environment. + + Install oneAPI base toolkit and enable it by: `source /opt/intel/oneapi/setvars.sh`. + + +- Hang during startup + + llama.cpp use mmap as default way to read model file and copy to GPU. In some system, memcpy will be abnormal and block. + + Solution: add **--no-mmap**. + +## Todo + +- Support to build in Windows. + +- Support multiple cards. diff --git a/bindings/go/Makefile b/bindings/go/Makefile index 4f3a416a3ff..e4436a6a291 100644 --- a/bindings/go/Makefile +++ b/bindings/go/Makefile @@ -11,21 +11,29 @@ UNAME_M := $(shell uname -m) endif GGML_METAL_PATH_RESOURCES := $(abspath ../..) -BUILD_DIR := build +BUILD_DIR := build_go MODELS_DIR := models EXAMPLES_DIR := $(wildcard examples/*) -INCLUDE_PATH := $(abspath ../..) -LIBRARY_PATH := $(abspath ../..) +INCLUDE_PATH := $(abspath ../../include):$(abspath ../../ggml/include) +LIBRARY_PATH := $(abspath ../../${BUILD_DIR}/src):$(abspath ../../${BUILD_DIR}/ggml/src) + +ifeq ($(GGML_CUDA),1) + LIBRARY_PATH := $(LIBRARY_PATH):$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib/ + BUILD_FLAGS := -ldflags "-extldflags '-lcudart -lcuda -lcublas'" +endif ifeq ($(UNAME_S),Darwin) - EXT_LDFLAGS := -framework Foundation -framework Metal -framework MetalKit + LIBRARY_PATH := $(LIBRARY_PATH):$(abspath ../../${BUILD_DIR}/ggml/src/ggml-blas):$(abspath ../../${BUILD_DIR}/ggml/src/ggml-metal) + EXT_LDFLAGS := -framework Foundation -framework Metal -framework MetalKit -lggml-metal -lggml-blas endif all: clean whisper examples whisper: mkdir - @echo Build whisper - @${MAKE} -C ../.. libwhisper.a + cmake -S ../.. -B ../../${BUILD_DIR} \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=OFF + cmake --build ../../${BUILD_DIR} --target whisper test: model-small whisper modtidy ifeq ($(UNAME_S),Darwin) diff --git a/bindings/go/README.md b/bindings/go/README.md index 1968cfd2470..9d832096512 100644 --- a/bindings/go/README.md +++ b/bindings/go/README.md @@ -31,7 +31,7 @@ func main() { if err != nil { panic(err) } - if err := context.Process(samples, nil, nil); err != nil { + if err := context.Process(samples, nil, nil, nil); err != nil { return err } @@ -51,7 +51,7 @@ func main() { In order to build, you need to have the Go compiler installed. You can get it from [here](https://golang.org/dl/). Run the tests with: ```bash -git clone https://github.com/ggerganov/whisper.cpp.git +git clone https://github.com/ggml-org/whisper.cpp.git cd whisper.cpp/bindings/go make test ``` @@ -62,6 +62,12 @@ This will compile a static `libwhisper.a` in a `build` folder, download a model make examples ``` +To build using cuda support add `GGML_CUDA=1`: + +```bash +GGML_CUDA=1 make examples +``` + The examples are placed in the `build` directory. Once built, you can download all the models with the following command: ```bash @@ -92,7 +98,7 @@ The API Documentation: Getting help: - * Follow the discussion for the go bindings [here](https://github.com/ggerganov/whisper.cpp/discussions/312) + * Follow the discussion for the go bindings [here](https://github.com/ggml-org/whisper.cpp/discussions/312) ## License diff --git a/bindings/go/doc.go b/bindings/go/doc.go index dcc351f2732..a5dae9314b0 100644 --- a/bindings/go/doc.go +++ b/bindings/go/doc.go @@ -1,5 +1,5 @@ /* -github.com/ggerganov/whisper.cpp/bindings/go +github.com/ggml-org/whisper.cpp/bindings/go provides a speech-to-text service bindings for the Go programming language. */ package whisper diff --git a/bindings/go/examples/go-model-download/context.go b/bindings/go/examples/go-model-download/context.go index 639d8f5bd96..7d5f0ddb1df 100644 --- a/bindings/go/examples/go-model-download/context.go +++ b/bindings/go/examples/go-model-download/context.go @@ -9,22 +9,23 @@ import ( // ContextForSignal returns a context object which is cancelled when a signal // is received. It returns nil if no signal parameter is provided func ContextForSignal(signals ...os.Signal) context.Context { - if len(signals) == 0 { - return nil - } + if len(signals) == 0 { + return nil + } - ch := make(chan os.Signal) - ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan os.Signal, 1) // Buffered channel with space for 1 signal + ctx, cancel := context.WithCancel(context.Background()) - // Send message on channel when signal received - signal.Notify(ch, signals...) + // Send message on channel when signal received + signal.Notify(ch, signals...) - // When any signal received, call cancel - go func() { - <-ch - cancel() - }() + // When any signal is received, call cancel + go func() { + <-ch + cancel() + }() - // Return success - return ctx + // Return success + return ctx } + diff --git a/bindings/go/examples/go-model-download/main.go b/bindings/go/examples/go-model-download/main.go index 3522d881717..728c6df53d4 100644 --- a/bindings/go/examples/go-model-download/main.go +++ b/bindings/go/examples/go-model-download/main.go @@ -9,6 +9,7 @@ import ( "net/url" "os" "path/filepath" + "strings" "syscall" "time" ) @@ -17,14 +18,27 @@ import ( // CONSTANTS const ( - srcUrl = "https://huggingface.co/ggerganov/whisper.cpp/resolve/main" // The location of the models - srcExt = ".bin" // Filename extension - bufSize = 1024 * 64 // Size of the buffer used for downloading the model + srcUrl = "https://huggingface.co/ggerganov/whisper.cpp/resolve/main/" // The location of the models + srcExt = ".bin" // Filename extension + bufSize = 1024 * 64 // Size of the buffer used for downloading the model ) var ( // The models which will be downloaded, if no model is specified as an argument - modelNames = []string{"ggml-tiny.en", "ggml-tiny", "ggml-base.en", "ggml-base", "ggml-small.en", "ggml-small", "ggml-medium.en", "ggml-medium", "ggml-large-v1", "ggml-large-v2", "ggml-large-v3"} + modelNames = []string{ + "tiny", "tiny-q5_1", "tiny-q8_0", + "tiny.en", "tiny.en-q5_1", "tiny.en-q8_0", + "base", "base-q5_1", "base-q8_0", + "base.en", "base.en-q5_1", "base.en-q8_0", + "small", "small-q5_1", "small-q8_0", + "small.en", "small.en-q5_1", "small.en-q8_0", + "medium", "medium-q5_0", "medium-q8_0", + "medium.en", "medium.en-q5_0", "medium.en-q8_0", + "large-v1", + "large-v2", "large-v2-q5_0", "large-v2-q8_0", + "large-v3", "large-v3-q5_0", + "large-v3-turbo", "large-v3-turbo-q5_0", "large-v3-turbo-q8_0", + } ) var ( @@ -44,7 +58,25 @@ var ( func main() { flag.Usage = func() { name := filepath.Base(flag.CommandLine.Name()) - fmt.Fprintf(flag.CommandLine.Output(), "Usage: %s [options] \n\n", name) + fmt.Fprintf(flag.CommandLine.Output(), ` + Usage: %s [options] [...] + + Options: + -out string Specify the output folder where models will be saved. + Default: Current working directory. + -timeout duration Set the maximum duration for downloading a model. + Example: 10m, 1h (default: 30m0s). + -quiet Suppress all output except errors. + + Examples: + 1. Download a specific model: + %s -out ./models tiny-q8_0 + + 2. Download all models: + %s -out ./models + + `, name, name, name) + flag.PrintDefaults() } flag.Parse() @@ -114,23 +146,87 @@ func GetOut() (string, error) { // GetModels returns the list of models to download func GetModels() []string { if flag.NArg() == 0 { - return modelNames - } else { - return flag.Args() + fmt.Println("No model specified.") + fmt.Println("Preparing to download all models...") + + // Calculate total download size + fmt.Println("Calculating total download size...") + totalSize, err := CalculateTotalDownloadSize(modelNames) + if err != nil { + fmt.Println("Error calculating download sizes:", err) + os.Exit(1) + } + + fmt.Println("View available models: https://huggingface.co/ggerganov/whisper.cpp/tree/main") + fmt.Printf("Total download size: %.2f GB\n", float64(totalSize)/(1024*1024*1024)) + fmt.Println("Would you like to download all models? (y/N)") + + // Prompt for user input + var response string + fmt.Scanln(&response) + if response != "y" && response != "Y" { + fmt.Println("Aborting. Specify a model to download.") + os.Exit(0) + } + + return modelNames // Return all models if confirmed } + return flag.Args() // Return specific models if arguments are provided +} + +func CalculateTotalDownloadSize(models []string) (int64, error) { + var totalSize int64 + client := http.Client{} + + for _, model := range models { + modelURL, err := URLForModel(model) + if err != nil { + return 0, err + } + + // Issue a HEAD request to get the file size + req, err := http.NewRequest("HEAD", modelURL, nil) + if err != nil { + return 0, err + } + + resp, err := client.Do(req) + if err != nil { + return 0, err + } + resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + fmt.Printf("Warning: Unable to fetch size for %s (HTTP %d)\n", model, resp.StatusCode) + continue + } + + size := resp.ContentLength + totalSize += size + } + return totalSize, nil } // URLForModel returns the URL for the given model on huggingface.co func URLForModel(model string) (string, error) { + // Ensure "ggml-" prefix is added only once + if !strings.HasPrefix(model, "ggml-") { + model = "ggml-" + model + } + + // Ensure ".bin" extension is added only once if filepath.Ext(model) != srcExt { model += srcExt } + + // Parse the base URL url, err := url.Parse(srcUrl) if err != nil { return "", err - } else { - url.Path = filepath.Join(url.Path, model) } + + // Ensure no trailing slash in the base URL + url.Path = fmt.Sprintf("%s/%s", strings.TrimSuffix(url.Path, "/"), model) return url.String(), nil } diff --git a/bindings/go/examples/go-whisper/process.go b/bindings/go/examples/go-whisper/process.go index 71e52f01000..833947e843c 100644 --- a/bindings/go/examples/go-whisper/process.go +++ b/bindings/go/examples/go-whisper/process.go @@ -67,7 +67,7 @@ func Process(model whisper.Model, path string, flags *Flags) error { // Process the data fmt.Fprintf(flags.Output(), " ...processing %q\n", path) context.ResetTimings() - if err := context.Process(data, cb, nil); err != nil { + if err := context.Process(data, nil, cb, nil); err != nil { return err } diff --git a/bindings/go/go.mod b/bindings/go/go.mod index 594f184baae..7c92c7b4890 100644 --- a/bindings/go/go.mod +++ b/bindings/go/go.mod @@ -1,10 +1,10 @@ module github.com/ggerganov/whisper.cpp/bindings/go -go 1.19 +go 1.23 require ( github.com/go-audio/wav v1.1.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.9.0 ) require ( diff --git a/bindings/go/go.sum b/bindings/go/go.sum index 870ebdc3c57..1f99aebdd90 100644 --- a/bindings/go/go.sum +++ b/bindings/go/go.sum @@ -1,4 +1,3 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-audio/audio v1.0.0 h1:zS9vebldgbQqktK4H0lUqWrG8P0NxCJVqcj7ZpNnwd4= @@ -9,15 +8,9 @@ github.com/go-audio/wav v1.1.0 h1:jQgLtbqBzY7G+BM8fXF7AHUk1uHUviWS4X39d5rsL2g= github.com/go-audio/wav v1.1.0/go.mod h1:mpe9qfwbScEbkd8uybLuIpTgHyrISw/OTuvjUW2iGtE= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/bindings/go/params.go b/bindings/go/params.go index 4b4da032d62..d8dee57e331 100644 --- a/bindings/go/params.go +++ b/bindings/go/params.go @@ -47,6 +47,7 @@ func (p *Params) SetPrintTimestamps(v bool) { p.print_timestamps = toBool(v) } + // Set language id func (p *Params) SetLanguage(lang int) error { if lang == -1 { @@ -119,11 +120,37 @@ func (p *Params) SetAudioCtx(n int) { p.audio_ctx = C.int(n) } +func (p *Params) SetMaxContext(n int) { + p.n_max_text_ctx = C.int(n) +} + +func (p *Params) SetBeamSize(n int) { + p.beam_search.beam_size = C.int(n) +} + +func (p *Params) SetEntropyThold(t float32) { + p.entropy_thold = C.float(t) +} + +func (p *Params) SetTemperature(t float32) { + p.temperature = C.float(t) +} + +// Sets the fallback temperature incrementation +// Pass -1.0 to disable this feature +func (p *Params) SetTemperatureFallback(t float32) { + p.temperature_inc = C.float(t) +} + // Set initial prompt func (p *Params) SetInitialPrompt(prompt string) { p.initial_prompt = C.CString(prompt) } +func (p *Params) SetCarryInitialPrompt(v bool) { + p.carry_initial_prompt = toBool(v) +} + /////////////////////////////////////////////////////////////////////////////// // PRIVATE METHODS @@ -149,6 +176,10 @@ func (p *Params) String() string { str += fmt.Sprintf(" duration_ms=%d", p.duration_ms) str += fmt.Sprintf(" audio_ctx=%d", p.audio_ctx) str += fmt.Sprintf(" initial_prompt=%s", C.GoString(p.initial_prompt)) + str += fmt.Sprintf(" entropy_thold=%f", p.entropy_thold) + str += fmt.Sprintf(" temperature=%f", p.temperature) + str += fmt.Sprintf(" temperature_inc=%f", p.temperature_inc) + str += fmt.Sprintf(" beam_size=%d", p.beam_search.beam_size) if p.translate { str += " translate" } @@ -173,6 +204,9 @@ func (p *Params) String() string { if p.token_timestamps { str += " token_timestamps" } + if p.carry_initial_prompt { + str += " carry_initial_prompt" + } return str + ">" } diff --git a/bindings/go/pkg/whisper/context.go b/bindings/go/pkg/whisper/context.go index ead92648f3e..cb3d9eb8c1c 100644 --- a/bindings/go/pkg/whisper/context.go +++ b/bindings/go/pkg/whisper/context.go @@ -71,6 +71,10 @@ func (context *context) Language() string { return whisper.Whisper_lang_str(context.params.Language()) } +func (context *context) DetectedLanguage() string { + return whisper.Whisper_lang_str(context.model.ctx.Whisper_full_lang_id()) +} + // Set translate flag func (context *context) SetTranslate(v bool) { context.params.SetTranslate(v) @@ -125,6 +129,32 @@ func (context *context) SetAudioCtx(n uint) { context.params.SetAudioCtx(int(n)) } +// Set maximum number of text context tokens to store +func (context *context) SetMaxContext(n int) { + context.params.SetMaxContext(n) +} + +// Set Beam Size +func (context *context) SetBeamSize(n int) { + context.params.SetBeamSize(n) +} + +// Set Entropy threshold +func (context *context) SetEntropyThold(t float32) { + context.params.SetEntropyThold(t) +} + +// Set Temperature +func (context *context) SetTemperature(t float32) { + context.params.SetTemperature(t) +} + +// Set the fallback temperature incrementation +// Pass -1.0 to disable this feature +func (context *context) SetTemperatureFallback(t float32) { + context.params.SetTemperatureFallback(t) +} + // Set initial prompt func (context *context) SetInitialPrompt(prompt string) { context.params.SetInitialPrompt(prompt) @@ -163,6 +193,7 @@ func (context *context) WhisperLangAutoDetect(offset_ms int, n_threads int) ([]f // Process new sample data and return any errors func (context *context) Process( data []float32, + callEncoderBegin EncoderBeginCallback, callNewSegment SegmentCallback, callProgress ProgressCallback, ) error { @@ -177,7 +208,20 @@ func (context *context) Process( // We don't do parallel processing at the moment processors := 0 if processors > 1 { - if err := context.model.ctx.Whisper_full_parallel(context.params, data, processors, nil, func(new int) { + if err := context.model.ctx.Whisper_full_parallel(context.params, data, processors, callEncoderBegin, + func(new int) { + if callNewSegment != nil { + num_segments := context.model.ctx.Whisper_full_n_segments() + s0 := num_segments - new + for i := s0; i < num_segments; i++ { + callNewSegment(toSegment(context.model.ctx, i)) + } + } + }); err != nil { + return err + } + } else if err := context.model.ctx.Whisper_full(context.params, data, callEncoderBegin, + func(new int) { if callNewSegment != nil { num_segments := context.model.ctx.Whisper_full_n_segments() s0 := num_segments - new @@ -185,22 +229,11 @@ func (context *context) Process( callNewSegment(toSegment(context.model.ctx, i)) } } - }); err != nil { - return err - } - } else if err := context.model.ctx.Whisper_full(context.params, data, nil, func(new int) { - if callNewSegment != nil { - num_segments := context.model.ctx.Whisper_full_n_segments() - s0 := num_segments - new - for i := s0; i < num_segments; i++ { - callNewSegment(toSegment(context.model.ctx, i)) + }, func(progress int) { + if callProgress != nil { + callProgress(progress) } - } - }, func(progress int) { - if callProgress != nil { - callProgress(progress) - } - }); err != nil { + }); err != nil { return err } diff --git a/bindings/go/pkg/whisper/context_test.go b/bindings/go/pkg/whisper/context_test.go index c8c6016e934..e98a4c2b80b 100644 --- a/bindings/go/pkg/whisper/context_test.go +++ b/bindings/go/pkg/whisper/context_test.go @@ -4,52 +4,121 @@ import ( "os" "testing" - // Packages - whisper "github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper" + "github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper" + "github.com/go-audio/wav" assert "github.com/stretchr/testify/assert" ) -const ( - ModelPath = "../../models/ggml-tiny.bin" - SamplePath = "../../samples/jfk.wav" -) +func TestSetLanguage(t *testing.T) { + assert := assert.New(t) + + model, err := whisper.New(ModelPath) + assert.NoError(err) + assert.NotNil(model) + defer model.Close() + + context, err := model.NewContext() + assert.NoError(err) + + // This returns an error since + // the model 'models/ggml-small.en.bin' + // that is loaded is not multilingual + err = context.SetLanguage("en") + assert.Error(err) +} + +func TestContextModelIsMultilingual(t *testing.T) { + assert := assert.New(t) + + model, err := whisper.New(ModelPath) + assert.NoError(err) + assert.NotNil(model) + defer model.Close() + + context, err := model.NewContext() + assert.NoError(err) + + isMultilingual := context.IsMultilingual() + + // This returns false since + // the model 'models/ggml-small.en.bin' + // that is loaded is not multilingual + assert.False(isMultilingual) +} + +func TestLanguage(t *testing.T) { + assert := assert.New(t) + + model, err := whisper.New(ModelPath) + assert.NoError(err) + assert.NotNil(model) + defer model.Close() -func Test_Whisper_000(t *testing.T) { + context, err := model.NewContext() + assert.NoError(err) + + // This always returns en since + // the model 'models/ggml-small.en.bin' + // that is loaded is not multilingual + expectedLanguage := "en" + actualLanguage := context.Language() + assert.Equal(expectedLanguage, actualLanguage) +} + +func TestProcess(t *testing.T) { assert := assert.New(t) - if _, err := os.Stat(ModelPath); os.IsNotExist(err) { - t.Skip("Skipping test, model not found:", ModelPath) - } - if _, err := os.Stat(SamplePath); os.IsNotExist(err) { - t.Skip("Skipping test, sample not found:", SamplePath) - } - - // Load model + + fh, err := os.Open(SamplePath) + assert.NoError(err) + defer fh.Close() + + // Decode the WAV file - load the full buffer + dec := wav.NewDecoder(fh) + buf, err := dec.FullPCMBuffer() + assert.NoError(err) + assert.Equal(uint16(1), dec.NumChans) + + data := buf.AsFloat32Buffer().Data + model, err := whisper.New(ModelPath) assert.NoError(err) assert.NotNil(model) - assert.NoError(model.Close()) + defer model.Close() + + context, err := model.NewContext() + assert.NoError(err) - t.Log("languages=", model.Languages()) + err = context.Process(data, nil, nil, nil) + assert.NoError(err) } -func Test_Whisper_001(t *testing.T) { +func TestDetectedLanguage(t *testing.T) { assert := assert.New(t) - if _, err := os.Stat(ModelPath); os.IsNotExist(err) { - t.Skip("Skipping test, model not found:", ModelPath) - } - if _, err := os.Stat(SamplePath); os.IsNotExist(err) { - t.Skip("Skipping test, sample not found:", SamplePath) - } - - // Load model + + fh, err := os.Open(SamplePath) + assert.NoError(err) + defer fh.Close() + + // Decode the WAV file - load the full buffer + dec := wav.NewDecoder(fh) + buf, err := dec.FullPCMBuffer() + assert.NoError(err) + assert.Equal(uint16(1), dec.NumChans) + + data := buf.AsFloat32Buffer().Data + model, err := whisper.New(ModelPath) assert.NoError(err) assert.NotNil(model) defer model.Close() - // Get context for decoding - ctx, err := model.NewContext() + context, err := model.NewContext() + assert.NoError(err) + + err = context.Process(data, nil, nil, nil) assert.NoError(err) - assert.NotNil(ctx) + expectedLanguage := "en" + actualLanguage := context.DetectedLanguage() + assert.Equal(expectedLanguage, actualLanguage) } diff --git a/bindings/go/pkg/whisper/interface.go b/bindings/go/pkg/whisper/interface.go index b430e7ce853..e3122c44b76 100644 --- a/bindings/go/pkg/whisper/interface.go +++ b/bindings/go/pkg/whisper/interface.go @@ -16,6 +16,10 @@ type SegmentCallback func(Segment) // processing. It is called during the Process function type ProgressCallback func(int) +// EncoderBeginCallback is the callback function for checking if we want to +// continue processing. It is called during the Process function +type EncoderBeginCallback func() bool + // Model is the interface to a whisper model. Create a new model with the // function whisper.New(string) type Model interface { @@ -31,29 +35,35 @@ type Model interface { Languages() []string } -// Context is the speach recognition context. +// Context is the speech recognition context. type Context interface { SetLanguage(string) error // Set the language to use for speech recognition, use "auto" for auto detect language. SetTranslate(bool) // Set translate flag IsMultilingual() bool // Return true if the model is multilingual. Language() string // Get language - - SetOffset(time.Duration) // Set offset - SetDuration(time.Duration) // Set duration - SetThreads(uint) // Set number of threads to use - SetSplitOnWord(bool) // Set split on word flag - SetTokenThreshold(float32) // Set timestamp token probability threshold - SetTokenSumThreshold(float32) // Set timestamp token sum probability threshold - SetMaxSegmentLength(uint) // Set max segment length in characters - SetTokenTimestamps(bool) // Set token timestamps flag - SetMaxTokensPerSegment(uint) // Set max tokens per segment (0 = no limit) - SetAudioCtx(uint) // Set audio encoder context - SetInitialPrompt(prompt string) // Set initial prompt + DetectedLanguage() string // Get detected language + + SetOffset(time.Duration) // Set offset + SetDuration(time.Duration) // Set duration + SetThreads(uint) // Set number of threads to use + SetSplitOnWord(bool) // Set split on word flag + SetTokenThreshold(float32) // Set timestamp token probability threshold + SetTokenSumThreshold(float32) // Set timestamp token sum probability threshold + SetMaxSegmentLength(uint) // Set max segment length in characters + SetTokenTimestamps(bool) // Set token timestamps flag + SetMaxTokensPerSegment(uint) // Set max tokens per segment (0 = no limit) + SetAudioCtx(uint) // Set audio encoder context + SetMaxContext(n int) // Set maximum number of text context tokens to store + SetBeamSize(n int) // Set Beam Size + SetEntropyThold(t float32) // Set Entropy threshold + SetInitialPrompt(prompt string) // Set initial prompt + SetTemperature(t float32) // Set temperature + SetTemperatureFallback(t float32) // Set temperature incrementation // Process mono audio data and return any errors. // If defined, newly generated segments are passed to the // callback function during processing. - Process([]float32, SegmentCallback, ProgressCallback) error + Process([]float32, EncoderBeginCallback, SegmentCallback, ProgressCallback) error // After process is called, return segments until the end of the stream // is reached, when io.EOF is returned. diff --git a/bindings/go/pkg/whisper/model_test.go b/bindings/go/pkg/whisper/model_test.go new file mode 100644 index 00000000000..8797f0d0fd0 --- /dev/null +++ b/bindings/go/pkg/whisper/model_test.go @@ -0,0 +1,91 @@ +package whisper_test + +import ( + "testing" + + "github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper" + assert "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + assert := assert.New(t) + t.Run("valid model path", func(t *testing.T) { + model, err := whisper.New(ModelPath) + assert.NoError(err) + assert.NotNil(model) + defer model.Close() + + }) + + t.Run("invalid model path", func(t *testing.T) { + invalidModelPath := "invalid-model-path.bin" + model, err := whisper.New(invalidModelPath) + assert.Error(err) + assert.Nil(model) + }) +} + +func TestClose(t *testing.T) { + assert := assert.New(t) + + model, err := whisper.New(ModelPath) + assert.NoError(err) + assert.NotNil(model) + + err = model.Close() + assert.NoError(err) +} + +func TestNewContext(t *testing.T) { + assert := assert.New(t) + + model, err := whisper.New(ModelPath) + assert.NoError(err) + assert.NotNil(model) + defer model.Close() + + context, err := model.NewContext() + assert.NoError(err) + assert.NotNil(context) +} + +func TestIsMultilingual(t *testing.T) { + assert := assert.New(t) + + model, err := whisper.New(ModelPath) + assert.NoError(err) + assert.NotNil(model) + defer model.Close() + + isMultilingual := model.IsMultilingual() + + // This returns false since + // the model 'models/ggml-small.en.bin' + // that is loaded is not multilingual + assert.False(isMultilingual) +} + +func TestLanguages(t *testing.T) { + assert := assert.New(t) + + model, err := whisper.New(ModelPath) + assert.NoError(err) + assert.NotNil(model) + defer model.Close() + + expectedLanguages := []string{ + "en", "zh", "de", "es", "ru", "ko", "fr", "ja", "pt", "tr", "pl", + "ca", "nl", "ar", "sv", "it", "id", "hi", "fi", "vi", "he", "uk", + "el", "ms", "cs", "ro", "da", "hu", "ta", "no", "th", "ur", "hr", + "bg", "lt", "la", "mi", "ml", "cy", "sk", "te", "fa", "lv", "bn", + "sr", "az", "sl", "kn", "et", "mk", "br", "eu", "is", "hy", "ne", + "mn", "bs", "kk", "sq", "sw", "gl", "mr", "pa", "si", "km", "sn", + "yo", "so", "af", "oc", "ka", "be", "tg", "sd", "gu", "am", "yi", + "lo", "uz", "fo", "ht", "ps", "tk", "nn", "mt", "sa", "lb", "my", + "bo", "tl", "mg", "as", "tt", "haw", "ln", "ha", "ba", "jw", "su", + } + + actualLanguages := model.Languages() + + assert.Equal(expectedLanguages, actualLanguages) +} diff --git a/bindings/go/pkg/whisper/util_test.go b/bindings/go/pkg/whisper/util_test.go new file mode 100644 index 00000000000..8ea2d5b4781 --- /dev/null +++ b/bindings/go/pkg/whisper/util_test.go @@ -0,0 +1,6 @@ +package whisper_test + +const ( + ModelPath = "../../models/ggml-small.en.bin" + SamplePath = "../../samples/jfk.wav" +) diff --git a/bindings/go/whisper.go b/bindings/go/whisper.go index 87da83f0f10..3ef73414d90 100644 --- a/bindings/go/whisper.go +++ b/bindings/go/whisper.go @@ -9,7 +9,9 @@ import ( // CGO /* -#cgo LDFLAGS: -lwhisper -lm -lstdc++ +#cgo LDFLAGS: -lwhisper -lggml -lggml-base -lggml-cpu -lm -lstdc++ +#cgo linux LDFLAGS: -fopenmp +#cgo darwin LDFLAGS: -lggml-metal -lggml-blas #cgo darwin LDFLAGS: -framework Accelerate -framework Metal -framework Foundation -framework CoreGraphics #include #include diff --git a/bindings/ios b/bindings/ios deleted file mode 160000 index a2085436c2e..00000000000 --- a/bindings/ios +++ /dev/null @@ -1 +0,0 @@ -Subproject commit a2085436c2eb796af90956b62bd64731f5e5b823 diff --git a/bindings/java/README.md b/bindings/java/README.md index 5255612e398..90426997237 100644 --- a/bindings/java/README.md +++ b/bindings/java/README.md @@ -23,26 +23,42 @@ import io.github.ggerganov.whispercpp.WhisperCpp; public class Example { public static void main(String[] args) { + WhisperCpp whisper = new WhisperCpp(); - // By default, models are loaded from ~/.cache/whisper/ and are usually named "ggml-${name}.bin" - // or you can provide the absolute path to the model file. - long context = whisper.initContext("base.en"); try { - var whisperParams = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_GREEDY); - // custom configuration if required - whisperParams.temperature_inc = 0f; + // By default, models are loaded from ~/.cache/whisper/ and are usually named "ggml-${name}.bin" + // or you can provide the absolute path to the model file. + whisper.initContext("../ggml-base.en.bin"); + WhisperFullParams.ByValue whisperParams = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_BEAM_SEARCH); - var samples = readAudio(); // divide each value by 32767.0f - whisper.fullTranscribe(whisperParams, samples); + // custom configuration if required + //whisperParams.n_threads = 8; + whisperParams.temperature = 0.0f; + whisperParams.temperature_inc = 0.2f; + //whisperParams.language = "en"; + + float[] samples = readAudio(); // divide each value by 32767.0f + List whisperSegmentList = whisper.fullTranscribeWithTime(whisperParams, samples); - int segmentCount = whisper.getTextSegmentCount(context); - for (int i = 0; i < segmentCount; i++) { - String text = whisper.getTextSegment(context, i); - System.out.println(segment.getText()); + for (WhisperSegment whisperSegment : whisperSegmentList) { + + long start = whisperSegment.getStart(); + long end = whisperSegment.getEnd(); + + String text = whisperSegment.getSentence(); + + System.out.println("start: "+start); + System.out.println("end: "+end); + System.out.println("text: "+text); + } + + } catch (IOException e) { + e.printStackTrace(); } finally { - whisper.freeContext(context); + whisper.close(); } + } } ``` @@ -52,7 +68,7 @@ public class Example { In order to build, you need to have the JDK 8 or higher installed. Run the tests with: ```bash -git clone https://github.com/ggerganov/whisper.cpp.git +git clone https://github.com/ggml-org/whisper.cpp.git cd whisper.cpp/bindings/java ./gradlew build @@ -67,5 +83,5 @@ copy /y ..\..\build\bin\Release\whisper.dll build\generated\resources\main\win32 ## License -The license for the Go bindings is the same as the license for the rest of the whisper.cpp project, which is the MIT License. See the `LICENSE` file for more details. +The license for the Java bindings is the same as the license for the rest of the whisper.cpp project, which is the MIT License. See the `LICENSE` file for more details. diff --git a/bindings/java/build.gradle b/bindings/java/build.gradle index 75f3a9cd901..30184eed7ea 100644 --- a/bindings/java/build.gradle +++ b/bindings/java/build.gradle @@ -25,25 +25,43 @@ sourceSets { } tasks.register('copyLibwhisperDynlib', Copy) { - from '../../build' - include 'libwhisper.dynlib' - into 'build/generated/resources/main/darwin' + from '../../build/src' + include 'libwhisper.dylib' + into 'build/generated/resources/main' } tasks.register('copyLibwhisperSo', Copy) { - from '../../build' + from '../../build/src' include 'libwhisper.so' - into 'build/generated/resources/main/linux-x86-64' + into 'build/generated/resources/main' } -tasks.register('copyWhisperDll', Copy) { - from '../../build/Release' +tasks.register('copyWhisperDLL', Copy) { + from '../../build/bin/Release' include 'whisper.dll' - into 'build/generated/resources/main/windows-x86-64' + into 'build/generated/resources/main' +} + +tasks.register('copyGGML_BASE_DLL', Copy) { + from '../../build/bin/Release' + include 'ggml-base.dll' + into 'build/generated/resources/main' +} + +tasks.register('copyGGML_DLL', Copy) { + from '../../build/bin/Release' + include 'ggml.dll' + into 'build/generated/resources/main' +} + +tasks.register('copyGGML_CPU_DLL', Copy) { + from '../../build/bin/Release' + include 'ggml-cpu.dll' + into 'build/generated/resources/main' } tasks.register('copyLibs') { - dependsOn copyLibwhisperDynlib, copyLibwhisperSo, copyWhisperDll + dependsOn copyLibwhisperDynlib, copyLibwhisperSo, copyWhisperDLL, copyGGML_BASE_DLL, copyGGML_DLL, copyGGML_CPU_DLL } test { @@ -55,7 +73,12 @@ java { withJavadocJar() } +sourcesJar() { + dependsOn copyLibs +} + jar { + dependsOn copyLibs exclude '**/whisper_java.exp', '**/whisper_java.lib' } @@ -67,6 +90,9 @@ tasks.withType(Test) { useJUnitPlatform() } +test.dependsOn copyLibs +processResources.dependsOn copyLibs + dependencies { implementation "net.java.dev.jna:jna:5.13.0" testImplementation "org.junit.jupiter:junit-jupiter:5.9.2" diff --git a/bindings/java/gradlew b/bindings/java/gradlew old mode 100644 new mode 100755 diff --git a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperConstants.java b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperConstants.java new file mode 100644 index 00000000000..0c828f1deef --- /dev/null +++ b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperConstants.java @@ -0,0 +1,24 @@ +package io.github.ggerganov.whispercpp; + +/** + * Presets for alignment heads in DTW token timestamps + */ +public class WhisperConstants { + // Alignment heads presets + public static final int WHISPER_AHEADS_NONE = 0; + public static final int WHISPER_AHEADS_TINY_EN = 1; + public static final int WHISPER_AHEADS_TINY = 2; + public static final int WHISPER_AHEADS_BASE_EN = 3; + public static final int WHISPER_AHEADS_BASE = 4; + public static final int WHISPER_AHEADS_SMALL_EN = 5; + public static final int WHISPER_AHEADS_SMALL = 6; + public static final int WHISPER_AHEADS_MEDIUM_EN = 7; + public static final int WHISPER_AHEADS_MEDIUM = 8; + public static final int WHISPER_AHEADS_LARGE_V1 = 9; + public static final int WHISPER_AHEADS_LARGE_V2 = 10; + public static final int WHISPER_AHEADS_LARGE_V3 = 11; + public static final int WHISPER_AHEADS_LARGE_V3_TURBO = 12; + public static final int WHISPER_AHEADS_CUSTOM = 13; + public static final int WHISPER_AHEADS_N_TOP_MOST = 14; + public static final int WHISPER_AHEADS_COUNT = 15; +} diff --git a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperContext.java b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperContext.java index 0498eb4df81..7ac124edbe6 100644 --- a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperContext.java +++ b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperContext.java @@ -1,7 +1,9 @@ package io.github.ggerganov.whispercpp; +import com.sun.jna.NativeLong; import com.sun.jna.Structure; import com.sun.jna.ptr.PointerByReference; +import com.sun.jna.Pointer; import io.github.ggerganov.whispercpp.ggml.GgmlType; import io.github.ggerganov.whispercpp.WhisperModel; import io.github.ggerganov.whispercpp.params.WhisperContextParams; @@ -9,33 +11,26 @@ import java.util.List; public class WhisperContext extends Structure { - int t_load_us = 0; - int t_start_us = 0; + public NativeLong t_load_us; + public NativeLong t_start_us; /** weight type (FP32 / FP16 / QX) */ - GgmlType wtype = GgmlType.GGML_TYPE_F16; + public GgmlType wtype = GgmlType.GGML_TYPE_F16; /** intermediate type (FP32 or FP16) */ - GgmlType itype = GgmlType.GGML_TYPE_F16; + public GgmlType itype = GgmlType.GGML_TYPE_F16; -// WhisperModel model; - public PointerByReference model; -// whisper_vocab vocab; -// whisper_state * state = nullptr; - public PointerByReference vocab; - public PointerByReference state; + public WhisperContextParams.ByValue params; + + public Pointer model; + public Pointer vocab; + public Pointer state; /** populated by whisper_init_from_file_with_params() */ - String path_model; - WhisperContextParams params; - -// public static class ByReference extends WhisperContext implements Structure.ByReference { -// } -// -// public static class ByValue extends WhisperContext implements Structure.ByValue { -// } -// -// @Override -// protected List getFieldOrder() { -// return List.of("t_load_us", "t_start_us", "wtype", "itype", "model", "vocab", "state", "path_model"); -// } + public Pointer path_model; + + @Override + protected List getFieldOrder() { + return List.of("t_load_us", "t_start_us", "wtype", "itype", + "params", "model", "vocab", "state", "path_model"); + } } diff --git a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperCpp.java b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperCpp.java index 4c1594d5d55..cc5314829c6 100644 --- a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperCpp.java +++ b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperCpp.java @@ -43,11 +43,11 @@ public void initContext(String modelPath) throws FileNotFoundException { * @param modelPath - absolute path, or just the name (eg: "base", "base-en" or "base.en") * @param params - params to use when initialising the context */ - public void initContext(String modelPath, WhisperContextParams params) throws FileNotFoundException { + public void initContext(String modelPath, WhisperContextParams.ByValue params) throws FileNotFoundException { initContextImpl(modelPath, params); } - private void initContextImpl(String modelPath, WhisperContextParams params) throws FileNotFoundException { + private void initContextImpl(String modelPath, WhisperContextParams.ByValue params) throws FileNotFoundException { if (ctx != null) { lib.whisper_free(ctx); } @@ -69,15 +69,13 @@ private void initContextImpl(String modelPath, WhisperContextParams params) thro /** * Provides default params which can be used with `whisper_init_from_file_with_params()` etc. - * Because this function allocates memory for the params, the caller must call either: - * - call `whisper_free_context_params()` - * - `Native.free(Pointer.nativeValue(pointer));` + * Returns a ByValue instance to ensure proper parameter passing to native code. */ - public WhisperContextParams getContextDefaultParams() { - paramsPointer = lib.whisper_context_default_params_by_ref(); - WhisperContextParams params = new WhisperContextParams(paramsPointer); - params.read(); - return params; + public WhisperContextParams.ByValue getContextDefaultParams() { + WhisperContextParams.ByValue valueParams = new WhisperContextParams.ByValue( + lib.whisper_context_default_params_by_ref()); + valueParams.read(); + return valueParams; } /** @@ -88,7 +86,7 @@ public WhisperContextParams getContextDefaultParams() { * * @param strategy - GREEDY */ - public WhisperFullParams getFullDefaultParams(WhisperSamplingStrategy strategy) { + public WhisperFullParams.ByValue getFullDefaultParams(WhisperSamplingStrategy strategy) { Pointer pointer; // whisper_full_default_params_by_ref allocates memory which we need to delete, so only create max 1 pointer for each strategy. @@ -104,7 +102,7 @@ public WhisperFullParams getFullDefaultParams(WhisperSamplingStrategy strategy) pointer = beamParamsPointer; } - WhisperFullParams params = new WhisperFullParams(pointer); + WhisperFullParams.ByValue params = new WhisperFullParams.ByValue(pointer); params.read(); return params; } @@ -138,15 +136,21 @@ private void freeParams() { } /** - * Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text. + * Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text. * Not thread safe for same context * Uses the specified decoding strategy to obtain the text. */ - public String fullTranscribe(WhisperFullParams whisperParams, float[] audioData) throws IOException { + public String fullTranscribe(WhisperFullParams.ByValue whisperParams, float[] audioData) throws IOException { if (ctx == null) { throw new IllegalStateException("Model not initialised"); } + /* + WhisperFullParams.ByValue valueParams = new WhisperFullParams.ByValue( + lib.whisper_full_default_params_by_ref(WhisperSamplingStrategy.WHISPER_SAMPLING_BEAM_SEARCH.ordinal())); + valueParams.read(); + */ + if (lib.whisper_full(ctx, whisperParams, audioData, audioData.length) != 0) { throw new IOException("Failed to process audio"); } @@ -163,7 +167,16 @@ public String fullTranscribe(WhisperFullParams whisperParams, float[] audioData) return str.toString().trim(); } - public List fullTranscribeWithTime(WhisperFullParams whisperParams, float[] audioData) throws IOException { + + /** + * Full transcribe with time list. + * + * @param whisperParams the whisper params + * @param audioData the audio data + * @return the list + * @throws IOException the io exception + */ + public List fullTranscribeWithTime(WhisperFullParams.ByValue whisperParams, float[] audioData) throws IOException { if (ctx == null) { throw new IllegalStateException("Model not initialised"); } @@ -175,7 +188,6 @@ public List fullTranscribeWithTime(WhisperFullParams whisperPara int nSegments = lib.whisper_full_n_segments(ctx); List segments= new ArrayList<>(nSegments); - for (int i = 0; i < nSegments; i++) { long t0 = lib.whisper_full_get_segment_t0(ctx, i); String text = lib.whisper_full_get_segment_text(ctx, i); diff --git a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperCppJnaLibrary.java b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperCppJnaLibrary.java index 1a73cee1181..690f1bd5258 100644 --- a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperCppJnaLibrary.java +++ b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/WhisperCppJnaLibrary.java @@ -9,6 +9,7 @@ import io.github.ggerganov.whispercpp.params.WhisperFullParams; public interface WhisperCppJnaLibrary extends Library { + WhisperCppJnaLibrary instance = Native.load("whisper", WhisperCppJnaLibrary.class); String whisper_print_system_info(); @@ -38,7 +39,7 @@ public interface WhisperCppJnaLibrary extends Library { * @param params Pointer to whisper_context_params * @return Whisper context on success, null on failure */ - Pointer whisper_init_from_file_with_params(String path_model, WhisperContextParams params); + Pointer whisper_init_from_file_with_params(String path_model, WhisperContextParams.ByValue params); /** * Allocate (almost) all memory needed for the model by loading from a buffer. @@ -180,12 +181,12 @@ public interface WhisperCppJnaLibrary extends Library { /** * @return the id of the specified language, returns -1 if not found. * Examples: - * "de" -> 2 - * "german" -> 2 + * "de" -> 2 + * "german" -> 2 */ int whisper_lang_id(String lang); - /** @return the short string of the specified language id (e.g. 2 -> "de"), returns nullptr if not found */ + /** @return the short string of the specified language id (e.g. 2 -> "de"), returns nullptr if not found */ String whisper_lang_str(int id); /** @@ -268,20 +269,21 @@ public interface WhisperCppJnaLibrary extends Library { void whisper_free_params(Pointer params); /** - * Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text + * Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text * Not thread safe for same context * Uses the specified decoding strategy to obtain the text. */ - int whisper_full(Pointer ctx, WhisperFullParams params, final float[] samples, int n_samples); + int whisper_full(Pointer ctx, WhisperFullParams.ByValue params, final float[] samples, int n_samples); - int whisper_full_with_state(Pointer ctx, Pointer state, WhisperFullParams params, final float[] samples, int n_samples); + public int whisper_full_with_state(Pointer ctx, Pointer state, WhisperFullParams.ByValue params, float[] samples, int n_samples); + //int whisper_full_with_state(Pointer ctx, Pointer state, WhisperFullParams params, final float[] samples, int n_samples); // Split the input audio in chunks and process each chunk separately using whisper_full_with_state() // Result is stored in the default state of the context // Not thread safe if executed in parallel on the same context. // It seems this approach can offer some speedup in some cases. // However, the transcription accuracy can be worse at the beginning and end of each chunk. - int whisper_full_parallel(Pointer ctx, WhisperFullParams params, final float[] samples, int n_samples, int n_processors); + int whisper_full_parallel(Pointer ctx, WhisperFullParams.ByValue params, final float[] samples, int n_samples, int n_processors); /** * Number of generated text segments. diff --git a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/callbacks/GgmlAbortCallback.java b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/callbacks/GgmlAbortCallback.java new file mode 100644 index 00000000000..244e4191f9a --- /dev/null +++ b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/callbacks/GgmlAbortCallback.java @@ -0,0 +1,17 @@ +package io.github.ggerganov.whispercpp.callbacks; + +import com.sun.jna.Callback; + +/** + * Callback for aborting GGML computation + * Maps to the C typedef: bool (*ggml_abort_callback)(void * data) + */ +public interface GgmlAbortCallback extends Callback { + /** + * Return true to abort the computation, false to continue + * + * @param data User data passed to the callback + * @return true to abort, false to continue + */ + boolean invoke(com.sun.jna.Pointer data); +} diff --git a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperAhead.java b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperAhead.java new file mode 100644 index 00000000000..39691dcb667 --- /dev/null +++ b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperAhead.java @@ -0,0 +1,30 @@ +package io.github.ggerganov.whispercpp.params; +import com.sun.jna.*; +import java.util.Arrays; +import java.util.List; + +public class WhisperAhead extends Structure { + + public int n_text_layer; + + public int n_head; + + public WhisperAhead() { + super(); + } + + public WhisperAhead(int textLayer, int head) { + super(); + this.n_text_layer = textLayer; + this.n_head = head; + } + + @Override + protected List getFieldOrder() { + return Arrays.asList("n_text_layer", "n_head"); + } + + public static class ByReference extends WhisperAhead implements Structure.ByReference {} + + public static class ByValue extends WhisperAhead implements Structure.ByValue {} +} diff --git a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperAheads.java b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperAheads.java new file mode 100644 index 00000000000..bca5eb0a17a --- /dev/null +++ b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperAheads.java @@ -0,0 +1,41 @@ +package io.github.ggerganov.whispercpp.params; +import com.sun.jna.*; +import java.util.Arrays; +import java.util.List; + +public class WhisperAheads extends Structure { + public NativeLong n_heads; + + public Pointer heads; + + public WhisperAheads() { + super(); + } + + /** + * Create alignment heads from an array of WhisperAhead objects + */ + public void setHeads(WhisperAhead[] aheadsArray) { + this.n_heads = new NativeLong(aheadsArray.length); + + int structSize = aheadsArray[0].size(); + Memory mem = new Memory(structSize * aheadsArray.length); + + for (int i = 0; i < aheadsArray.length; i++) { + aheadsArray[i].write(); + byte[] buffer = aheadsArray[i].getPointer().getByteArray(0, structSize); + mem.write(i * structSize, buffer, 0, buffer.length); + } + + this.heads = mem; + } + + @Override + protected List getFieldOrder() { + return Arrays.asList("n_heads", "heads"); + } + + public static class ByReference extends WhisperAheads implements Structure.ByReference {} + + public static class ByValue extends WhisperAheads implements Structure.ByValue {} +} diff --git a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperContextParams.java b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperContextParams.java index cf98d2c3757..66ec5d70465 100644 --- a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperContextParams.java +++ b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperContextParams.java @@ -1,7 +1,5 @@ package io.github.ggerganov.whispercpp.params; - import com.sun.jna.*; - import java.util.Arrays; import java.util.List; @@ -11,21 +9,73 @@ * whisper_context_default_params() */ public class WhisperContextParams extends Structure { - public WhisperContextParams(Pointer p) { super(p); } - /** Use GPU for inference Number (default = true) */ + public WhisperContextParams() { + super(); + } + + /** Use GPU for inference (default = true) */ public CBool use_gpu; - /** Use GPU for inference Number (default = true) */ + /** Use flash attention (default = true) */ + public CBool flash_attn; + + /** CUDA device to use (default = 0) */ + public int gpu_device; + + /** [EXPERIMENTAL] Enable token-level timestamps with DTW (default = false) */ + public CBool dtw_token_timestamps; + + /** [EXPERIMENTAL] Alignment heads preset for DTW */ + public int dtw_aheads_preset; + + /** Number of top layers to use for DTW when using WHISPER_AHEADS_N_TOP_MOST preset */ + public int dtw_n_top; + + public WhisperAheads.ByValue dtw_aheads; + + /** DTW memory size (internal use) */ + public NativeLong dtw_mem_size; + + /** Use GPU for inference */ public void useGpu(boolean enable) { use_gpu = enable ? CBool.TRUE : CBool.FALSE; } + /** Use flash attention */ + public void useFlashAttn(boolean enable) { + flash_attn = enable ? CBool.TRUE : CBool.FALSE; + } + + /** Enable DTW token-level timestamps */ + public void enableDtwTokenTimestamps(boolean enable) { + dtw_token_timestamps = enable ? CBool.TRUE : CBool.FALSE; + } + + /** Set DTW alignment heads preset */ + public void setDtwAheadsPreset(int preset) { + dtw_aheads_preset = preset; + } + @Override protected List getFieldOrder() { - return Arrays.asList("use_gpu"); + return Arrays.asList( + "use_gpu", + "flash_attn", + "gpu_device", + "dtw_token_timestamps", + "dtw_aheads_preset", + "dtw_n_top", + "dtw_aheads", + "dtw_mem_size" + ); + } + + public static class ByValue extends WhisperContextParams implements Structure.ByValue { + public ByValue() { super(); } + public ByValue(Pointer p) { super(p); } } } diff --git a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperFullParams.java b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperFullParams.java index 90d8c15767c..76ce80fb4cc 100644 --- a/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperFullParams.java +++ b/bindings/java/src/main/java/io/github/ggerganov/whispercpp/params/WhisperFullParams.java @@ -5,6 +5,7 @@ import io.github.ggerganov.whispercpp.callbacks.WhisperLogitsFilterCallback; import io.github.ggerganov.whispercpp.callbacks.WhisperNewSegmentCallback; import io.github.ggerganov.whispercpp.callbacks.WhisperProgressCallback; +import io.github.ggerganov.whispercpp.callbacks.GgmlAbortCallback; import java.util.Arrays; import java.util.List; @@ -16,10 +17,12 @@ */ public class WhisperFullParams extends Structure { + public WhisperFullParams() { + super(); + } + public WhisperFullParams(Pointer p) { super(p); -// super(p, ALIGN_MSVC); -// super(p, ALIGN_GNUC); } /** Sampling strategy for whisper_full() function. */ @@ -69,10 +72,10 @@ public void singleSegment(boolean single) { single_segment = single ? CBool.TRUE : CBool.FALSE; } - /** Flag to print special tokens (e.g., <SOT>, <EOT>, <BEG>, etc.). (default = false) */ + /** Flag to print special tokens (e.g., <SOT>, <EOT>, <BEG>, etc.). (default = false) */ public CBool print_special; - /** Flag to print special tokens (e.g., <SOT>, <EOT>, <BEG>, etc.). (default = false) */ + /** Flag to print special tokens (e.g., <SOT>, <EOT>, <BEG>, etc.). (default = false) */ public void printSpecial(boolean enable) { print_special = enable ? CBool.TRUE : CBool.FALSE; } @@ -129,6 +132,14 @@ public void splitOnWord(boolean enable) { /** Maximum tokens per segment (0, default = no limit) */ public int max_tokens; + /** [EXPERIMENTAL] Enable debug mode for extra info */ + public CBool debug_mode; + + /** Enable debug mode */ + public void enableDebugMode(boolean enable) { + debug_mode = enable ? CBool.TRUE : CBool.FALSE; + } + /** Overwrite the audio context size (0 = use default). */ public int audio_ctx; @@ -146,6 +157,8 @@ public void tdrzEnable(boolean enable) { /** Tokens to provide to the whisper decoder as an initial prompt. * These are prepended to any existing text context from a previous call. */ public String initial_prompt; + /** Always prepend initial_prompt for every decode chunk. */ + public CBool carry_initial_prompt; /** Prompt tokens. (int*) */ public Pointer prompt_tokens; @@ -181,11 +194,11 @@ public void suppressBlanks(boolean enable) { } /** Flag to suppress non-speech tokens. */ - public CBool suppress_non_speech_tokens; + public CBool suppress_nst; /** Flag to suppress non-speech tokens. */ public void suppressNonSpeechTokens(boolean enable) { - suppress_non_speech_tokens = enable ? CBool.TRUE : CBool.FALSE; + suppress_nst = enable ? CBool.TRUE : CBool.FALSE; } /** Initial decoding temperature. */ @@ -274,6 +287,16 @@ public void setBeamSizeAndPatience(int beamSize, float patience) { */ public Pointer encoder_begin_callback_user_data; + /** Callback used to abort GGML computation */ + public Pointer abort_callback; + + /** User data for the abort_callback */ + public Pointer abort_callback_user_data; + + public void setAbortCallback(GgmlAbortCallback callback) { + abort_callback = CallbackReference.getFunctionPointer(callback); + } + /** * Callback by each decoder to filter obtained logits. * WhisperLogitsFilterCallback @@ -310,17 +333,28 @@ public void setLogitsFilterCallback(WhisperLogitsFilterCallback callback) { @Override protected List getFieldOrder() { - return Arrays.asList("strategy", "n_threads", "n_max_text_ctx", "offset_ms", "duration_ms", "translate", - "no_context", "single_segment", "no_timestamps", - "print_special", "print_progress", "print_realtime", "print_timestamps", "token_timestamps", - "thold_pt", "thold_ptsum", "max_len", "split_on_word", "max_tokens", "audio_ctx", - "tdrz_enable", "suppress_regex", "initial_prompt", "prompt_tokens", "prompt_n_tokens", "language", "detect_language", - "suppress_blank", "suppress_non_speech_tokens", "temperature", "max_initial_ts", "length_penalty", - "temperature_inc", "entropy_thold", "logprob_thold", "no_speech_thold", "greedy", "beam_search", - "new_segment_callback", "new_segment_callback_user_data", + return Arrays.asList("strategy", "n_threads", "n_max_text_ctx", + "offset_ms", "duration_ms", "translate", "no_context", + "no_timestamps", "single_segment", "print_special", + "print_progress", "print_realtime", "print_timestamps", + "token_timestamps", "thold_pt", "thold_ptsum", "max_len", + "split_on_word", "max_tokens", "debug_mode", "audio_ctx", + "tdrz_enable", "suppress_regex", "initial_prompt", "carry_initial_prompt", + "prompt_tokens", "prompt_n_tokens", "language", "detect_language", + "suppress_blank", "suppress_nst", "temperature", + "max_initial_ts", "length_penalty", "temperature_inc", + "entropy_thold", "logprob_thold", "no_speech_thold", "greedy", + "beam_search", "new_segment_callback", "new_segment_callback_user_data", "progress_callback", "progress_callback_user_data", "encoder_begin_callback", "encoder_begin_callback_user_data", + "abort_callback", "abort_callback_user_data", "logits_filter_callback", "logits_filter_callback_user_data", "grammar_rules", "n_grammar_rules", "i_start_rule", "grammar_penalty"); } + + public static class ByValue extends WhisperFullParams implements Structure.ByValue { + public ByValue() { super(); } + public ByValue(Pointer p) { super(p); } + } + } diff --git a/bindings/java/src/test/java/io/github/ggerganov/whispercpp/WhisperCppTest.java b/bindings/java/src/test/java/io/github/ggerganov/whispercpp/WhisperCppTest.java index 034726ad29c..e5b22cf8de8 100644 --- a/bindings/java/src/test/java/io/github/ggerganov/whispercpp/WhisperCppTest.java +++ b/bindings/java/src/test/java/io/github/ggerganov/whispercpp/WhisperCppTest.java @@ -4,6 +4,7 @@ import io.github.ggerganov.whispercpp.bean.WhisperSegment; import io.github.ggerganov.whispercpp.params.CBool; +import io.github.ggerganov.whispercpp.params.WhisperContextParams; import io.github.ggerganov.whispercpp.params.WhisperFullParams; import io.github.ggerganov.whispercpp.params.WhisperSamplingStrategy; import org.junit.jupiter.api.BeforeAll; @@ -25,7 +26,9 @@ static void init() throws FileNotFoundException { //String modelName = "../../models/ggml-tiny.bin"; String modelName = "../../models/ggml-tiny.en.bin"; try { - whisper.initContext(modelName); + WhisperContextParams.ByValue contextParams = whisper.getContextDefaultParams(); + contextParams.useFlashAttn(false); // Disable flash attention + whisper.initContext(modelName, contextParams); //whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_GREEDY); //whisper.getJavaDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_BEAM_SEARCH); modelInitialised = true; @@ -76,7 +79,7 @@ void testFullTranscribe() throws Exception { float[] floats = new float[b.length / 2]; //WhisperFullParams params = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_GREEDY); - WhisperFullParams params = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_BEAM_SEARCH); + WhisperFullParams.ByValue params = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_BEAM_SEARCH); params.setProgressCallback((ctx, state, progress, user_data) -> System.out.println("progress: " + progress)); params.print_progress = CBool.FALSE; //params.initial_prompt = "and so my fellow Americans um, like"; @@ -118,7 +121,7 @@ void testFullTranscribeWithTime() throws Exception { float[] floats = new float[b.length / 2]; //WhisperFullParams params = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_GREEDY); - WhisperFullParams params = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_BEAM_SEARCH); + WhisperFullParams.ByValue params = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_BEAM_SEARCH); params.setProgressCallback((ctx, state, progress, user_data) -> System.out.println("progress: " + progress)); params.print_progress = CBool.FALSE; //params.initial_prompt = "and so my fellow Americans um, like"; diff --git a/bindings/javascript/README.md b/bindings/javascript/README.md index 87f3480574c..5e726e13d2c 100644 --- a/bindings/javascript/README.md +++ b/bindings/javascript/README.md @@ -33,6 +33,9 @@ mkdir build-em && cd build-em emcmake cmake .. && make -j # run test +node ../tests/test-whisper.js + +# For Node.js versions prior to v16.4.0, experimental features need to be enabled: node --experimental-wasm-threads --experimental-wasm-simd ../tests/test-whisper.js # publish npm package diff --git a/bindings/javascript/package.json b/bindings/javascript/package.json index 2b3c806f353..37bc75098f9 100644 --- a/bindings/javascript/package.json +++ b/bindings/javascript/package.json @@ -1,6 +1,6 @@ { "name": "whisper.cpp", - "version": "1.6.2", + "version": "1.8.2", "description": "Whisper speech recognition", "main": "whisper.js", "scripts": { diff --git a/bindings/ruby/.gitignore b/bindings/ruby/.gitignore new file mode 100644 index 00000000000..54e3a2ac184 --- /dev/null +++ b/bindings/ruby/.gitignore @@ -0,0 +1,9 @@ +LICENSE +pkg/ +lib/whisper.* +ext/examples/ +ext/ggml/ +ext/include/ +ext/scripts/ +ext/src/ +test/fixtures/ diff --git a/bindings/ruby/README.md b/bindings/ruby/README.md new file mode 100644 index 00000000000..fff6efc7c5c --- /dev/null +++ b/bindings/ruby/README.md @@ -0,0 +1,349 @@ +whispercpp +========== + +![whisper.cpp](https://user-images.githubusercontent.com/1991296/235238348-05d0f6a4-da44-4900-a1de-d0707e75b763.jpeg) + +Ruby bindings for [whisper.cpp][], an interface of automatic speech recognition model. + +Installation +------------ + +Install the gem and add to the application's Gemfile by executing: + + $ bundle add whispercpp + +If bundler is not being used to manage dependencies, install the gem by executing: + + $ gem install whispercpp + +You can pass build options for whisper.cpp, for instance: + + $ bundle config build.whispercpp --enable-ggml-cuda + +or, + + $ gem install whispercpp -- --enable-ggml-cuda + +See whisper.cpp's [README](https://github.com/ggml-org/whisper.cpp/blob/master/README.md) for available options. You need convert options present the README to Ruby-style options, for example: + +Boolean options: + +* `-DGGML_BLAS=1` -> `--enable-ggml-blas` +* `-DWHISER_COREML=OFF` -> `--disable-whisper-coreml` + +Argument options: + +* `-DGGML_CUDA_COMPRESSION_MODE=size` -> `--ggml-cuda-compression-mode=size` + +Combination: + +* `-DGGML_CUDA=1 -DCMAKE_CUDA_ARCHITECTURES="86"` -> `--enable-ggml-cuda --cmake_cuda-architectures="86"` + +For boolean options like `GGML_CUDA`, the README says `-DGGML_CUDA=1`. You need strip `-D`, prepend `--enable-` for `1` or `ON` (`--disable-` for `0` or `OFF`) and make it kebab-case: `--enable-ggml-cuda`. +For options which require arguments like `CMAKE_CUDA_ARCHITECTURES`, the README says `-DCMAKE_CUDA_ARCHITECTURES="86"`. You need strip `-D`, prepend `--`, make it kebab-case, append `=` and append argument: `--cmake-cuda-architectures="86"`. + +Usage +----- + +```ruby +require "whisper" + +whisper = Whisper::Context.new("base") + +params = Whisper::Params.new( + language: "en", + offset: 10_000, + duration: 60_000, + max_text_tokens: 300, + translate: true, + print_timestamps: false, + initial_prompt: "Initial prompt here." +) + +whisper.transcribe("path/to/audio.wav", params) do |whole_text| + puts whole_text +end + +``` + +### Preparing model ### + +Some models are prepared up-front: + +You also can use shorthand for pre-converted models: + +```ruby +whisper = Whisper::Context.new("base.en") +``` + +You can see the list of prepared model names by `Whisper::Model.pre_converted_models.keys`: + +```ruby +puts Whisper::Model.pre_converted_models.keys +# tiny +# tiny.en +# tiny-q5_1 +# tiny.en-q5_1 +# tiny-q8_0 +# base +# base.en +# base-q5_1 +# base.en-q5_1 +# base-q8_0 +# : +# : +``` + +You can also retrieve each model: + +```ruby +base_en = Whisper::Model.pre_converted_models["base.en"] +whisper = Whisper::Context.new(base_en) +``` + +At first time you use a model, it is downloaded automatically. After that, downloaded cached file is used. To clear cache, call `#clear_cache`: + +```ruby +Whisper::Model.pre_converted_models["base"].clear_cache +``` + +You can also use local model files you prepared: + +```ruby +whisper = Whisper::Context.new("path/to/your/model.bin") +``` + +Or, you can download model files: + +```ruby +whisper = Whisper::Context.new("https://example.net/uri/of/your/model.bin") +# Or +whisper = Whisper::Context.new(URI("https://example.net/uri/of/your/model.bin")) +``` + +See [models][] page for details. + +### Preparing audio file ### + +Currently, whisper.cpp accepts only 16-bit WAV files. + +### Voice Activity Detection (VAD) ### + +Support for Voice Activity Detection (VAD) can be enabled by setting `Whisper::Params`'s `vad` argument to `true` and specifying VAD model: + +```ruby +Whisper::Params.new( + vad: true, + vad_model_path: "silero-v5.1.2", + # other arguments... +) +``` + +When you pass the model name (`"silero-v5.1.2"`) or URI (`https://huggingface.co/ggml-org/whisper-vad/resolve/main/ggml-silero-v5.1.2.bin`), it will be downloaded automatically. +Currently, "silero-v5.1.2" is registered as pre-converted model like ASR models. You also specify file path or URI of model. + +If you need configure VAD behavior, pass params for that: + +```ruby +Whisper::Params.new( + vad: true, + vad_model_path: "silero-v5.1.2", + vad_params: Whisper::VAD::Params.new( + threshold: 1.0, # defaults to 0.5 + min_speech_duration_ms: 500, # defaults to 250 + min_silence_duration_ms: 200, # defaults to 100 + max_speech_duration_s: 30000, # default is FLT_MAX, + speech_pad_ms: 50, # defaults to 30 + samples_overlap: 0.5 # defaults to 0.1 + ), + # other arguments... +) +``` + +For details on VAD, see [whisper.cpp's README](https://github.com/ggml-org/whisper.cpp?tab=readme-ov-file#voice-activity-detection-vad). + +### Output ### + +whispercpp supports SRT and WebVTT output: + +```ruby +puts whisper.transcribe("path/to/audio.wav", Whisper::Params.new).to_webvtt +# => +WEBVTT + +1 +00:00:00.000 --> 00:00:03.860 + My thought I have nobody by a beauty and will as you poured. + +2 +00:00:03.860 --> 00:00:09.840 + Mr. Rochester is sub in that so-don't find simplest, and devoted about, to let might in + +3 +00:00:09.840 --> 00:00:09.940 + a + +``` + +You may call `#to_srt`, too + + +API +--- + +### Transcription ### + +By default, `Whisper::Context#transcribe` works in a single thread. You can make it work in parallel by passing `n_processors` option: + +```ruby +whisper.transcribe("path/to/audio.wav", params, n_processors: Etc.nprocessors) +``` + +Note that transcription occasionally might be low accuracy when it works in parallel. + +### Segments ### + +Once `Whisper::Context#transcribe` called, you can retrieve segments by `#each_segment`: + +```ruby +def format_time(time_ms) + sec, decimal_part = time_ms.divmod(1000) + min, sec = sec.divmod(60) + hour, min = min.divmod(60) + "%02d:%02d:%02d.%03d" % [hour, min, sec, decimal_part] +end + +whisper + .transcribe("path/to/audio.wav", params) + .each_segment.with_index do |segment, index| + line = "[%{nth}: %{st} --> %{ed}] %{text}" % { + nth: index + 1, + st: format_time(segment.start_time), + ed: format_time(segment.end_time), + text: segment.text + } + line << " (speaker turned)" if segment.speaker_turn_next? + puts line + end + +``` + +You can also add hook to params called on new segment: + +```ruby +# Add hook before calling #transcribe +params.on_new_segment do |segment| + line = "[%{st} --> %{ed}] %{text}" % { + st: format_time(segment.start_time), + ed: format_time(segment.end_time), + text: segment.text + } + line << " (speaker turned)" if segment.speaker_turn_next? + puts line +end + +whisper.transcribe("path/to/audio.wav", params) + +``` + +### Models ### + +You can see model information: + +```ruby +whisper = Whisper::Context.new("base") +model = whisper.model + +model.n_vocab # => 51864 +model.n_audio_ctx # => 1500 +model.n_audio_state # => 512 +model.n_audio_head # => 8 +model.n_audio_layer # => 6 +model.n_text_ctx # => 448 +model.n_text_state # => 512 +model.n_text_head # => 8 +model.n_text_layer # => 6 +model.n_mels # => 80 +model.ftype # => 1 +model.type # => "base" + +``` + +### Logging ### + +You can set log callback: + +```ruby +prefix = "[MyApp] " +log_callback = ->(level, buffer, user_data) { + case level + when Whisper::LOG_LEVEL_NONE + puts "#{user_data}none: #{buffer}" + when Whisper::LOG_LEVEL_INFO + puts "#{user_data}info: #{buffer}" + when Whisper::LOG_LEVEL_WARN + puts "#{user_data}warn: #{buffer}" + when Whisper::LOG_LEVEL_ERROR + puts "#{user_data}error: #{buffer}" + when Whisper::LOG_LEVEL_DEBUG + puts "#{user_data}debug: #{buffer}" + when Whisper::LOG_LEVEL_CONT + puts "#{user_data}same to previous: #{buffer}" + end +} +Whisper.log_set log_callback, prefix +``` + +Using this feature, you are also able to suppress log: + +```ruby +Whisper.log_set ->(level, buffer, user_data) { + # do nothing +}, nil +Whisper::Context.new("base") +``` + +### Low-level API to transcribe ### + +You can also call `Whisper::Context#full` and `#full_parallel` with a Ruby array as samples. Although `#transcribe` with audio file path is recommended because it extracts PCM samples in C++ and is fast, `#full` and `#full_parallel` give you flexibility. + +```ruby +require "whisper" +require "wavefile" + +reader = WaveFile::Reader.new("path/to/audio.wav", WaveFile::Format.new(:mono, :float, 16000)) +samples = reader.enum_for(:each_buffer).map(&:samples).flatten + +whisper = Whisper::Context.new("base") +whisper + .full(Whisper::Params.new, samples) + .each_segment do |segment| + puts segment.text + end +``` + +The second argument `samples` may be an array, an object with `length` and `each` method, or a MemoryView. If you can prepare audio data as C array and export it as a MemoryView, whispercpp accepts and works with it with zero copy. + +Development +----------- + + % git clone https://github.com/ggml-org/whisper.cpp.git + % cd whisper.cpp/bindings/ruby + % rake test + +First call of `rake test` builds an extension and downloads a model for testing. After that, you add tests in `tests` directory and modify `ext/ruby_whisper.cpp`. + +If something seems wrong on build, running `rake clean` solves some cases. + +### Need help ### + +* Windows support +* Refinement of C/C++ code, especially memory management + +License +------- + +The same to [whisper.cpp][]. + +[whisper.cpp]: https://github.com/ggml-org/whisper.cpp +[models]: https://github.com/ggml-org/whisper.cpp/tree/master/models diff --git a/bindings/ruby/Rakefile b/bindings/ruby/Rakefile index 354d8ef2547..d9a66030de4 100644 --- a/bindings/ruby/Rakefile +++ b/bindings/ruby/Rakefile @@ -1,12 +1,96 @@ require 'rake/clean' - require 'rubygems/package' +require "bundler/gem_tasks" +require "rake/testtask" +require_relative "extsources" -desc 'Build gem' -task :package do - spec_source = File.read File.join(File.dirname(__FILE__),'whispercpp.gemspec') - spec = nil - # see: http://gist.github.com/16215 - Thread.new { spec = eval("#{spec_source}") }.join - spec.validate - Gem::Package.build(spec) +SOURCES_DIR = "ext/sources" + +SOURCES = FileList[] + +EXTSOURCES.each do |src| + basename = src.pathmap("%f") + dest = basename == "LICENSE" ? basename + : src.pathmap("%{\\.\\./\\.\\.,#{SOURCES_DIR}}p") + .pathmap("%{\\.\\./javascript,#{SOURCES_DIR}/bindings/javascript}p") + dir = dest.pathmap("%d") + file src + directory dir + file dest => [src, dir] do |t| + cp t.source, t.name + end + SOURCES.include dest +end + +CLEAN.include SOURCES + +SRC = FileList["ext/*.{c,cpp,h}"] + +task build: SOURCES + +directory "pkg" +CLOBBER.include "pkg" + +LIB_NAME = "whisper".ext(RbConfig::CONFIG["DLEXT"]) +SO_FILE = File.join("ext", LIB_NAME) +LIB_FILE = File.join("lib", LIB_NAME) + +file "ext/Makefile" => SRC + ["ext/extconf.rb"] + SOURCES do |t| + chdir "ext" do + ruby "extconf.rb" + end +end +if File.exist? "ext/Makefile" + task :make_clean do + cd "ext" do + sh "make", "clean" + end + end + task clean: :make_clean + task :make_distclean do + cd "ext" do + sh "make", "distclean" + end + end + task clobber: :make_distclean end + +file SO_FILE => "ext/Makefile" do |t| + chdir "ext" do + sh "make" + end +end +CLEAN.include SO_FILE + +directory "lib" +file LIB_FILE => [SO_FILE, "lib"] do |t| + copy t.source, t.name +end +CLEAN.include LIB_FILE + +Rake::TestTask.new + +TEST_FIXTURE_AUDIO = "test/fixtures/jfk.wav" +TEST_FIXTURE_AUDIO_SRC = File.expand_path(File.join(__dir__, "..", "..", "samples", "jfk.wav")) +TEST_FIXTURE_AUDIO_DIR = TEST_FIXTURE_AUDIO.pathmap("%d") +directory TEST_FIXTURE_AUDIO_DIR +if File.exist? TEST_FIXTURE_AUDIO_SRC + file TEST_FIXTURE_AUDIO => [TEST_FIXTURE_AUDIO_SRC, TEST_FIXTURE_AUDIO_DIR] do |t| + symlink t.source, t.name + end +else + require "open-uri" + file TEST_FIXTURE_AUDIO => TEST_FIXTURE_AUDIO_DIR do |t| + File.write t.name, URI("https://github.com/ggml-org/whisper.cpp/raw/refs/heads/master/samples/jfk.wav").read + end +end + +TEST_MEMORY_VIEW = "test/jfk_reader/jfk_reader.#{RbConfig::CONFIG['DLEXT']}" +file TEST_MEMORY_VIEW => "test/jfk_reader/jfk_reader.c" do |t| + chdir "test/jfk_reader" do + ruby "extconf.rb" + sh "make" + end +end +CLEAN.include TEST_MEMORY_VIEW + +task test: [LIB_FILE, TEST_MEMORY_VIEW, TEST_FIXTURE_AUDIO] diff --git a/bindings/ruby/ext/.gitignore b/bindings/ruby/ext/.gitignore index 9f9b7abd60f..6fd36e40e28 100644 --- a/bindings/ruby/ext/.gitignore +++ b/bindings/ruby/ext/.gitignore @@ -1,9 +1,9 @@ Makefile -ggml.c -ggml.h -ggml-alloc.c -ggml-alloc.h +whisper.so whisper.bundle -whisper.cpp -whisper.h -dr_wav.h +whisper.dll +*.o +*.a +sources/* +!sources/CMakeGraphVizOptions.cmake +mkmf.log diff --git a/bindings/ruby/ext/dependencies.rb b/bindings/ruby/ext/dependencies.rb new file mode 100644 index 00000000000..2ba4b94b62b --- /dev/null +++ b/bindings/ruby/ext/dependencies.rb @@ -0,0 +1,73 @@ +require "tsort" + +class Dependencies + include TSort + + def initialize(cmake, options) + @cmake = cmake + @options = options + @static_lib_shape = nil + @nodes = {} + @graph = Hash.new {|h, k| h[k] = []} + + generate_dot + parse_dot + end + + def libs + tsort.filter_map {|node| + label, shape = @nodes[node] + if shape == @static_lib_shape + label.gsub(/\\n\([^)]+\)/, '') + else + nil + end + }.reverse.collect {|lib| "lib#{lib}.a"} + end + + def to_s + libs.join(" ") + end + + private + + def dot_path + File.join(__dir__, "build", "whisper.cpp.dot") + end + + def generate_dot + args = ["-S", "sources", "-B", "build", "--graphviz", dot_path, "-D", "BUILD_SHARED_LIBS=OFF"] + args << @options.to_s unless @options.to_s.empty? + system @cmake, *args, exception: true + end + + def parse_dot + File.open(dot_path).each_line do |line| + case line + when /\[\s*label\s*=\s*"Static Library"\s*,\s*shape\s*=\s*(?\w+)\s*\]/ + @static_lib_shape = $~[:shape] + when /\A\s*"(?\w+)"\s*\[\s*label\s*=\s*"(?