diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fe86863893600..15e1133095213 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1305,6 +1305,81 @@ jobs: cd examples/llama.android ./gradlew build --no-daemon + android-ndk-build: + runs-on: ubuntu-latest + + env: + OPENCL_VERSION: 2025.07.22 + + strategy: + matrix: + include: + - build: 'arm64-cpu' + defines: '-D ANDROID_ABI=arm64-v8a -D ANDROID_PLATFORM=android-31 -D CMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake -D GGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8.5-a+fp16+i8mm -G Ninja -D LLAMA_CURL=OFF -D GGML_OPENMP=OFF' + - build: 'arm64-snapdragon' + defines: '--preset arm64-android-snapdragon-release' + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Install OpenCL Headers and Libs + id: install_opencl + if: ${{ matrix.build == 'arm64-snapdragon' }} + run: | + mkdir opencl + curl -L -o opencl/clhpp.tar.gz https://github.com/KhronosGroup/OpenCL-CLHPP/archive/refs/tags/v${OPENCL_VERSION}.tar.gz + curl -L -o opencl/headers.tar.gz https://github.com/KhronosGroup/OpenCL-Headers/archive/refs/tags/v${OPENCL_VERSION}.tar.gz + curl -L -o opencl/icd-loader.tar.gz https://github.com/KhronosGroup/OpenCL-ICD-Loader/archive/refs/tags/v${OPENCL_VERSION}.tar.gz + tar -xaf opencl/headers.tar.gz -C opencl + tar -xaf opencl/clhpp.tar.gz -C opencl + tar -xaf opencl/icd-loader.tar.gz -C opencl + sudo cp -r opencl/OpenCL-Headers-${OPENCL_VERSION}/CL ${ANDROID_NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/include + sudo cp -r opencl/OpenCL-CLHPP-${OPENCL_VERSION}/include/CL/* ${ANDROID_NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/include/CL + cd opencl/OpenCL-ICD-Loader-${OPENCL_VERSION} + cmake -B build -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake -DOPENCL_ICD_LOADER_HEADERS_DIR=${ANDROID_NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/include -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=31 -DANDROID_STL=c++_shared + cmake --build build + sudo cp build/libOpenCL.so ${ANDROID_NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/aarch64-linux-android + rm -rf opencl + + - name: Install Hexagon SDK + id: install_hexsdk + if: ${{ matrix.build == 'arm64-snapdragon' }} + env: + HEXSDK_VER: 6.4.0.2 + HEXTLS_VER: 19.0.04 + run: | + curl -L -o hex-sdk.tar.gz https://github.com/snapdragon-toolchain/hexagon-sdk/releases/download/v$HEXSDK_VER/hexagon-sdk-v$HEXSDK_VER-amd64-lnx.tar.xz + mkdir hex-sdk + tar -xaf hex-sdk.tar.gz -C hex-sdk + ls -l hex-sdk + sudo mv hex-sdk /opt/hexagon + echo "HEXAGON_SDK_ROOT=/opt/hexagon/$HEXSDK_VER" >> "$GITHUB_ENV" + echo "HEXAGON_TOOLS_ROOT=/opt/hexagon/$HEXSDK_VER/tools/HEXAGON_Tools/$HEXTLS_VER" >> "$GITHUB_ENV" + echo "DEFAULT_HLOS_ARCH=64" >> "$GITHUB_ENV" + echo "DEFAULT_TOOLS_VARIANT=toolv19" >> "$GITHUB_ENV" + echo "DEFAULT_NO_QURT_INC=0" >> "$GITHUB_ENV" + echo "DEFAULT_DSP_ARCH=v73" >> "$GITHUB_ENV" + + - name: Update CMake presets + id: update_presets + if: ${{ matrix.build == 'arm64-snapdragon' }} + run: | + cp docs/backend/hexagon/CMakeUserPresets.json . + + - name: Build + id: ndk_build + run: | + cmake ${{ matrix.defines }} -B build + cmake --build build + cmake --install build --prefix pkg-adb/llama.cpp + + - name: Test + id: cmake_test + run: | + echo "FIXME: test on devices" + openEuler-latest-cmake-cann: if: ${{ github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'Ascend NPU') }} defaults: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2ad381159409c..cab3ba9e68ee4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -134,6 +134,8 @@ jobs: include: - build: 'x64' os: ubuntu-22.04 + - build: 's390x-z15' # z15 because our CI runners are on z15 + os: ubuntu-22.04-s390x # GGML_BACKEND_DL and GGML_CPU_ALL_VARIANTS are not currently supported on arm # - build: 'arm64' # os: ubuntu-22.04-arm diff --git a/.github/workflows/update-ops-docs.yml b/.github/workflows/update-ops-docs.yml index c0218fa742173..d5e264b34f496 100644 --- a/.github/workflows/update-ops-docs.yml +++ b/.github/workflows/update-ops-docs.yml @@ -3,10 +3,12 @@ name: Update Operations Documentation on: push: paths: + - 'docs/ops.md' - 'docs/ops/**' - 'scripts/create_ops_docs.py' pull_request: paths: + - 'docs/ops.md' - 'docs/ops/**' - 'scripts/create_ops_docs.py' diff --git a/CODEOWNERS b/CODEOWNERS index 3b696bf94a147..53d2e1e7ed49e 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -55,7 +55,7 @@ /ggml/src/ggml-cuda/common.cuh @slaren /ggml/src/ggml-cuda/fattn* @JohannesGaessler /ggml/src/ggml-cuda/ggml-cuda.cu @slaren -/ggml/src/ggml-cuda/mmf.* @JohannesGaessler +/ggml/src/ggml-cuda/mmf.* @JohannesGaessler @am17an /ggml/src/ggml-cuda/mmq.* @JohannesGaessler /ggml/src/ggml-cuda/mmvf.* @JohannesGaessler /ggml/src/ggml-cuda/mmvq.* @JohannesGaessler @@ -65,6 +65,7 @@ /ggml/src/ggml-impl.h @ggerganov @slaren /ggml/src/ggml-metal/ @ggerganov /ggml/src/ggml-opencl/ @lhez @max-krasnyansky +/ggml/src/ggml-hexagon/ @max-krasnyansky /ggml/src/ggml-opt.cpp @JohannesGaessler /ggml/src/ggml-quants.* @ggerganov /ggml/src/ggml-rpc/ @rgerganov diff --git a/README.md b/README.md index 1c0742370de39..6d30a8bdab058 100644 --- a/README.md +++ b/README.md @@ -138,6 +138,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo - [x] [Ling models](https://huggingface.co/collections/inclusionAI/ling-67c51c85b34a7ea0aba94c32) - [x] [LFM2 models](https://huggingface.co/collections/LiquidAI/lfm2-686d721927015b2ad73eaa38) - [x] [Hunyuan models](https://huggingface.co/collections/tencent/hunyuan-dense-model-6890632cda26b19119c9c5e7) +- [x] [BailingMoeV2 (Ring/Ling 2.0) models](https://huggingface.co/collections/inclusionAI/ling-v2-68bf1dd2fc34c306c1fa6f86) #### Multimodal @@ -187,6 +188,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo - Swift [srgtuszy/llama-cpp-swift](https://github.com/srgtuszy/llama-cpp-swift) - Swift [ShenghaiWang/SwiftLlama](https://github.com/ShenghaiWang/SwiftLlama) - Delphi [Embarcadero/llama-cpp-delphi](https://github.com/Embarcadero/llama-cpp-delphi) +- Go (no CGo needed): [hybridgroup/yzma](https://github.com/hybridgroup/yzma) @@ -278,6 +280,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo | [IBM zDNN](docs/backend/zDNN.md) | IBM Z & LinuxONE | | [WebGPU [In Progress]](docs/build.md#webgpu) | All | | [RPC](https://github.com/ggml-org/llama.cpp/tree/master/tools/rpc) | All | +| [Hexagon [In Progress]](docs/backend/hexagon/README.md) | Snapdragon | ## Obtaining and quantizing models diff --git a/ci/run.sh b/ci/run.sh index bf0d53f20af56..1a4806976aa9a 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -75,7 +75,7 @@ if [ ! -z ${GG_BUILD_ROCM} ]; then exit 1 fi - CMAKE_EXTRA="${CMAKE_EXTRA} -DAMDGPU_TARGETS=${GG_BUILD_AMDGPU_TARGETS}" + CMAKE_EXTRA="${CMAKE_EXTRA} -DGPU_TARGETS=${GG_BUILD_AMDGPU_TARGETS}" fi if [ ! -z ${GG_BUILD_SYCL} ]; then diff --git a/common/arg.cpp b/common/arg.cpp index c0b718071127d..a25743c899862 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -1760,7 +1760,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP})); add_opt(common_arg( {"-t", "--threads"}, "N", - string_format("number of threads to use during generation (default: %d)", params.cpuparams.n_threads), + string_format("number of CPU threads to use during generation (default: %d)", params.cpuparams.n_threads), [](common_params & params, int value) { params.cpuparams.n_threads = value; if (params.cpuparams.n_threads <= 0) { @@ -3435,7 +3435,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params) { params.use_jinja = true; } - ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_JINJA")); + ).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_MTMD}).set_env("LLAMA_ARG_JINJA")); add_opt(common_arg( {"--reasoning-format"}, "FORMAT", "controls whether thought tags are allowed and/or extracted from the response, and in which format they're returned; one of:\n" diff --git a/common/json-schema-to-grammar.cpp b/common/json-schema-to-grammar.cpp index db1f0b23dd7c2..dd9b51a9e50fd 100644 --- a/common/json-schema-to-grammar.cpp +++ b/common/json-schema-to-grammar.cpp @@ -41,9 +41,9 @@ static std::string build_repetition(const std::string & item_rule, int min_items return result; } -static void _build_min_max_int(int min_value, int max_value, std::stringstream & out, int decimals_left = 16, bool top_level = true) { - auto has_min = min_value != std::numeric_limits::min(); - auto has_max = max_value != std::numeric_limits::max(); +static void _build_min_max_int(int64_t min_value, int64_t max_value, std::stringstream & out, int decimals_left = 16, bool top_level = true) { + auto has_min = min_value != std::numeric_limits::min(); + auto has_max = max_value != std::numeric_limits::max(); auto digit_range = [&](char from, char to) { out << "["; @@ -159,7 +159,7 @@ static void _build_min_max_int(int min_value, int max_value, std::stringstream & if (has_min) { if (min_value < 0) { out << "\"-\" ("; - _build_min_max_int(std::numeric_limits::min(), -min_value, out, decimals_left, /* top_level= */ false); + _build_min_max_int(std::numeric_limits::min(), -min_value, out, decimals_left, /* top_level= */ false); out << ") | [0] | [1-9] "; more_digits(0, decimals_left - 1); } else if (min_value == 0) { @@ -194,7 +194,7 @@ static void _build_min_max_int(int min_value, int max_value, std::stringstream & } digit_range(c, c); out << " ("; - _build_min_max_int(std::stoi(min_s.substr(1)), std::numeric_limits::max(), out, less_decimals, /* top_level= */ false); + _build_min_max_int(std::stoll(min_s.substr(1)), std::numeric_limits::max(), out, less_decimals, /* top_level= */ false); out << ")"; if (c < '9') { out << " | "; @@ -216,7 +216,7 @@ static void _build_min_max_int(int min_value, int max_value, std::stringstream & _build_min_max_int(0, max_value, out, decimals_left, /* top_level= */ true); } else { out << "\"-\" ("; - _build_min_max_int(-max_value, std::numeric_limits::max(), out, decimals_left, /* top_level= */ false); + _build_min_max_int(-max_value, std::numeric_limits::max(), out, decimals_left, /* top_level= */ false); out << ")"; } return; @@ -925,17 +925,17 @@ class SchemaConverter { int max_len = schema.contains("maxLength") ? schema["maxLength"].get() : std::numeric_limits::max(); return _add_rule(rule_name, "\"\\\"\" " + build_repetition(char_rule, min_len, max_len) + " \"\\\"\" space"); } else if (schema_type == "integer" && (schema.contains("minimum") || schema.contains("exclusiveMinimum") || schema.contains("maximum") || schema.contains("exclusiveMaximum"))) { - int min_value = std::numeric_limits::min(); - int max_value = std::numeric_limits::max(); + int64_t min_value = std::numeric_limits::min(); + int64_t max_value = std::numeric_limits::max(); if (schema.contains("minimum")) { - min_value = schema["minimum"].get(); + min_value = schema["minimum"].get(); } else if (schema.contains("exclusiveMinimum")) { - min_value = schema["exclusiveMinimum"].get() + 1; + min_value = schema["exclusiveMinimum"].get() + 1; } if (schema.contains("maximum")) { - max_value = schema["maximum"].get(); + max_value = schema["maximum"].get(); } else if (schema.contains("exclusiveMaximum")) { - max_value = schema["exclusiveMaximum"].get() - 1; + max_value = schema["exclusiveMaximum"].get() - 1; } std::stringstream out; out << "("; diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 8c5132193e0e0..05d791806df1e 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -29,12 +29,29 @@ sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) import gguf from gguf.vocab import MistralTokenizerType, MistralVocab -from mistral_common.tokens.tokenizers.base import TokenizerVersion -from mistral_common.tokens.tokenizers.multimodal import DATASET_MEAN, DATASET_STD -from mistral_common.tokens.tokenizers.tekken import Tekkenizer -from mistral_common.tokens.tokenizers.sentencepiece import ( - SentencePieceTokenizer, -) + +try: + from mistral_common.tokens.tokenizers.base import TokenizerVersion # pyright: ignore[reportMissingImports] + from mistral_common.tokens.tokenizers.multimodal import DATASET_MEAN as _MISTRAL_COMMON_DATASET_MEAN, DATASET_STD as _MISTRAL_COMMON_DATASET_STD # pyright: ignore[reportMissingImports] + from mistral_common.tokens.tokenizers.tekken import Tekkenizer # pyright: ignore[reportMissingImports] + from mistral_common.tokens.tokenizers.sentencepiece import ( # pyright: ignore[reportMissingImports] + SentencePieceTokenizer, + ) + + _mistral_common_installed = True + _mistral_import_error_msg = "" +except ImportError: + _MISTRAL_COMMON_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073) + _MISTRAL_COMMON_DATASET_STD = (0.26862954, 0.26130258, 0.27577711) + + _mistral_common_installed = False + TokenizerVersion = None + Tekkenizer = None + SentencePieceTokenizer = None + _mistral_import_error_msg = ( + "Mistral format requires `mistral-common` to be installed. Please run " + "`pip install mistral-common[image,audio]` to install it." + ) logger = logging.getLogger("hf-to-gguf") @@ -73,10 +90,8 @@ class ModelBase: use_temp_file: bool lazy: bool dry_run: bool - part_names: list[str] - is_safetensors: bool hparams: dict[str, Any] - tensor_names: set[str] | None + model_tensors: dict[str, Callable[[], Tensor]] gguf_writer: gguf.GGUFWriter model_name: str | None metadata_override: Path | None @@ -107,6 +122,9 @@ def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, type(self) is MmprojModel: raise TypeError(f"{type(self).__name__!r} should not be directly instantiated") + if self.is_mistral_format and not _mistral_common_installed: + raise ImportError(_mistral_import_error_msg) + self.dir_model = dir_model self.ftype = ftype self.fname_out = fname_out @@ -117,25 +135,8 @@ def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, self.dry_run = dry_run self.remote_hf_model_id = remote_hf_model_id self.sentence_transformers_dense_modules = sentence_transformers_dense_modules - if remote_hf_model_id is not None: - self.is_safetensors = True - - def get_remote_tensors() -> Iterator[tuple[str, Tensor]]: - logger.info(f"Using remote model with HuggingFace id: {remote_hf_model_id}") - remote_tensors = gguf.utility.SafetensorRemote.get_list_tensors_hf_model(remote_hf_model_id) - self.tensor_names = set(name for name in remote_tensors.keys()) - for name, remote_tensor in remote_tensors.items(): - yield (name, LazyTorchTensor.from_remote_tensor(remote_tensor)) - - self.get_tensors = get_remote_tensors - else: - prefix = "model" if not self.is_mistral_format else "consolidated" - self.part_names = ModelBase.get_model_part_names(self.dir_model, prefix, ".safetensors") - self.is_safetensors = len(self.part_names) > 0 - if not self.is_safetensors: - self.part_names = ModelBase.get_model_part_names(self.dir_model, "pytorch_model", ".bin") self.hparams = ModelBase.load_hparams(self.dir_model, self.is_mistral_format) if hparams is None else hparams - self.tensor_names = None + self.model_tensors = self.index_tensors(remote_hf_model_id=remote_hf_model_id) self.metadata_override = metadata_override self.model_name = model_name self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py @@ -151,6 +152,8 @@ def get_remote_tensors() -> Iterator[tuple[str, Tensor]]: logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})") self.ftype = gguf.LlamaFileType.MOSTLY_BF16 + self.dequant_model() + # Configure GGUF Writer self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file, split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard) @@ -172,67 +175,215 @@ def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any: return None raise KeyError(f"could not find any of: {keys}") - def get_tensors(self) -> Iterator[tuple[str, Tensor]]: - tensor_names_from_parts: set[str] = set() + def index_tensors(self, remote_hf_model_id: str | None = None) -> dict[str, Callable[[], Tensor]]: + tensors: dict[str, Callable[[], Tensor]] = {} + + if remote_hf_model_id is not None: + is_safetensors = True + + logger.info(f"Using remote model with HuggingFace id: {remote_hf_model_id}") + remote_tensors = gguf.utility.SafetensorRemote.get_list_tensors_hf_model(remote_hf_model_id) + for name, remote_tensor in remote_tensors.items(): + tensors[name] = lambda r=remote_tensor: LazyTorchTensor.from_remote_tensor(r) + + return tensors + + prefix = "model" if not self.is_mistral_format else "consolidated" + part_names: list[str] = ModelBase.get_model_part_names(self.dir_model, prefix, ".safetensors") + is_safetensors: bool = len(part_names) > 0 + if not is_safetensors: + part_names = ModelBase.get_model_part_names(self.dir_model, "pytorch_model", ".bin") + + tensor_names_from_index: set[str] = set() if not self.is_mistral_format: - index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin" + index_name = "model.safetensors" if is_safetensors else "pytorch_model.bin" index_name += ".index.json" index_file = self.dir_model / index_name if index_file.is_file(): - self.tensor_names = set() logger.info(f"gguf: loading model weight map from '{index_name}'") with open(index_file, "r", encoding="utf-8") as f: index: dict[str, Any] = json.load(f) weight_map = index.get("weight_map") if weight_map is None or not isinstance(weight_map, dict): raise ValueError(f"Can't load 'weight_map' from {index_name!r}") - self.tensor_names.update(weight_map.keys()) + tensor_names_from_index.update(weight_map.keys()) else: - self.tensor_names = tensor_names_from_parts weight_map = {} else: - self.tensor_names = tensor_names_from_parts weight_map = {} - for part_name in self.part_names: - logger.info(f"gguf: loading model part '{part_name}'") + for part_name in part_names: + logger.info(f"gguf: indexing model part '{part_name}'") ctx: ContextManager[Any] - if self.is_safetensors: + if is_safetensors: from safetensors import safe_open ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu")) else: ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True)) with ctx as model_part: - tensor_names_from_parts.update(model_part.keys()) + assert model_part is not None for name in model_part.keys(): - if self.is_safetensors: + if is_safetensors: if self.lazy: data = model_part.get_slice(name) - data = LazyTorchTensor.from_safetensors_slice(data) + data_gen = lambda data=data: LazyTorchTensor.from_safetensors_slice(data) # noqa: E731 else: data = model_part.get_tensor(name) + data_gen = lambda data=data: data # noqa: E731 else: data = model_part[name] if self.lazy: - data = LazyTorchTensor.from_eager(data) - yield name, data + data_gen = lambda data=data: LazyTorchTensor.from_eager(data) # noqa: E731 + else: + data_gen = lambda data=data: data # noqa: E731 + tensors[name] = data_gen # verify tensor name presence and identify potentially missing files - if len(tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0: - missing = sorted(self.tensor_names.difference(tensor_names_from_parts)) - extra = sorted(tensor_names_from_parts.difference(self.tensor_names)) - missing_files = sorted(set(weight_map[n] for n in missing if n in weight_map)) - if len(extra) == 0 and len(missing_files) > 0: - raise ValueError(f"Missing or incomplete model files: {missing_files}\n" - f"Missing tensors: {missing}") + if len(tensor_names_from_index) > 0: + tensor_names_from_parts = set(tensors.keys()) + if len(tensor_names_from_parts.symmetric_difference(tensor_names_from_index)) > 0: + missing = sorted(tensor_names_from_index.difference(tensor_names_from_parts)) + extra = sorted(tensor_names_from_parts.difference(tensor_names_from_index)) + missing_files = sorted(set(weight_map[n] for n in missing if n in weight_map)) + if len(extra) == 0 and len(missing_files) > 0: + raise ValueError(f"Missing or incomplete model files: {missing_files}\n" + f"Missing tensors: {missing}") + else: + raise ValueError("Mismatch between weight map and model parts for tensor names:\n" + f"Missing tensors: {missing}\n" + f"Extra tensors: {extra}") + + return tensors + + def dequant_model(self): + tensors_to_remove: list[str] = [] + new_tensors: dict[str, Callable[[], Tensor]] = {} + + if (quant_config := self.hparams.get("quantization_config")) and isinstance(quant_config, dict): + quant_method = quant_config.get("quant_method") + + def dequant_bitnet(weight: Tensor, scale: Tensor) -> Tensor: + weight = weight.view(torch.uint8) + orig_shape = weight.shape + + shift = torch.tensor([0, 2, 4, 6], dtype=torch.uint8).reshape((4, *(1 for _ in range(len(orig_shape))))) + data = weight.unsqueeze(0).expand((4, *orig_shape)) >> shift + data = data & 3 + data = (data.float() - 1).reshape((orig_shape[0] * 4, *orig_shape[1:])) + + # The scale is inverted + return data / scale.float() + + def dequant_simple(weight: Tensor, scale: Tensor) -> Tensor: + scale = scale.float() + + if (weight_block_size := quant_config.get("weight_block_size")): + # TODO: make sure it's a list of integers + for i, size in enumerate(weight_block_size): + scale = scale.repeat_interleave(size, i) + # unpad the scale (e.g. when the tensor size isn't a multiple of the block size) + scale = scale[tuple(slice(0, size) for size in weight.shape)] + + return weight.float() * scale + + # ref: https://github.com/ModelCloud/GPTQModel/blob/037c5c0f6c9e33c500d975b038d02e7ca437546d/gptqmodel/nn_modules/qlinear/__init__.py#L437-L476 + def dequant_gptq(g_idx: Tensor, qweight: Tensor, qzeros: Tensor, scales: Tensor) -> Tensor: + bits = quant_config["bits"] + assert bits in (2, 3, 4, 8) + assert qweight.dtype == qzeros.dtype + maxq = (2 ** bits) - 1 + weight = None + zeros = None + pack_dtype_bits = qweight.dtype.itemsize * 8 + + if bits in [2, 4, 8]: + pack_factor = pack_dtype_bits // bits + wf = torch.tensor(list(range(0, pack_dtype_bits, bits)), dtype=torch.int32).unsqueeze(0) + if self.lazy: + wf = LazyTorchTensor.from_eager(wf) + + zeros = torch.bitwise_right_shift( + qzeros.unsqueeze(2).expand(-1, -1, pack_factor), + wf.unsqueeze(0) + ).to(torch.int16 if bits == 8 else torch.int8) + zeros = torch.bitwise_and(zeros, maxq).reshape(scales.shape) + + weight = torch.bitwise_and( + torch.bitwise_right_shift( + qweight.unsqueeze(1).expand(-1, pack_factor, -1), + wf.unsqueeze(-1) + ).to(torch.int16 if bits == 8 else torch.int8), + maxq + ) + elif bits == 3: + raise NotImplementedError("3-bit gptq dequantization is not yet implemented") + + assert weight is not None + assert zeros is not None + + weight = weight.reshape(weight.shape[0] * weight.shape[1], weight.shape[2]) + + # gptq_v2 doesn't need to offset zeros + if quant_config.get("checkpoint_format", "gptq") == "gptq": + zeros += 1 + + return (scales[g_idx].float() * (weight - zeros[g_idx]).float()).T + + if quant_method == "bitnet": + for name in self.model_tensors.keys(): + if name.endswith(".weight_scale"): + weight_name = name.removesuffix("_scale") + w = self.model_tensors[weight_name] + s = self.model_tensors[name] + self.model_tensors[weight_name] = lambda w=w, s=s: dequant_bitnet(w(), s()) + tensors_to_remove.append(name) + elif quant_method == "fp8": + for name in self.model_tensors.keys(): + if name.endswith(".weight_scale_inv"): + weight_name = name.removesuffix("_scale_inv") + w = self.model_tensors[weight_name] + s = self.model_tensors[name] + self.model_tensors[weight_name] = lambda w=w, s=s: dequant_simple(w(), s()) + tensors_to_remove.append(name) + elif quant_method == "gptq": + for name in self.model_tensors.keys(): + if name.endswith(".qweight"): + base_name = name.removesuffix(".qweight") + g_idx = self.model_tensors[base_name + ".g_idx"] + qweight = self.model_tensors[base_name + ".qweight"] + qzeros = self.model_tensors[base_name + ".qzeros"] + scales = self.model_tensors[base_name + ".scales"] + new_tensors[base_name + ".weight"] = ( + lambda g=g_idx, z=qzeros, w=qweight, s=scales: dequant_gptq( + g(), w(), z(), s() + ) + ) + tensors_to_remove += [ + base_name + n + for n in ( + ".g_idx", + ".qzeros", + ".qweight", + ".scales", + ) + ] else: - raise ValueError("Mismatch between weight map and model parts for tensor names:\n" - f"Missing tensors: {missing}\n" - f"Extra tensors: {extra}") + raise NotImplementedError(f"Quant method is not yet supported: {quant_method!r}") + + for name in tensors_to_remove: + if name in self.model_tensors: + del self.model_tensors[name] + + for name, value in new_tensors.items(): + self.model_tensors[name] = value + + def get_tensors(self) -> Iterator[tuple[str, Tensor]]: + for name, gen in self.model_tensors.items(): + yield name, gen() def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str: if key not in gguf.MODEL_TENSORS[self.model_arch]: @@ -892,8 +1043,8 @@ def get_vocab_base_pre(self, tokenizer) -> str: # ref: https://huggingface.co/JetBrains/Mellum-4b-base res = "mellum" if chkhsh == "9b1be57e70d20d9501b2b3186e792d81181ae36ada3903c26f9fea418cf87206": - # ref: https://huggingface.co/inclusionAI/LLaDA-MoE-7B-A1B-Base - res = "llada-moe" + # ref: https://huggingface.co/inclusionAI/Ling-mini-base-2.0 + res = "bailingmoe2" if chkhsh == "53e325976a6e142379c19b09afcae354f2f496f147afa8f9e189a33fe4e3024e": # ref: https://huggingface.co/ibm-granite/granite-docling-258M res = "granite-docling" @@ -1346,6 +1497,17 @@ def get_audio_config(self) -> dict[str, Any] | None: def set_type(self): self.gguf_writer.add_type(gguf.GGUFType.MMPROJ) + def prepare_metadata(self, vocab_only: bool): + super().prepare_metadata(vocab_only=vocab_only) + + output_type: str = self.ftype.name.partition("_")[2] + + if self.fname_out.is_dir(): + fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, size_label=None, output_type=output_type, model_type=None) + self.fname_out = self.fname_out / f"mmproj-{fname_default}.gguf" + else: + self.fname_out = self.fname_out.parent / gguf.fill_templated_filename(self.fname_out.name, output_type) + def set_gguf_parameters(self): self.gguf_writer.add_file_type(self.ftype) @@ -1363,8 +1525,8 @@ def set_gguf_parameters(self): self.gguf_writer.add_vision_head_count(self.find_vparam(["num_attention_heads"])) # preprocessor config - image_mean = DATASET_MEAN if self.is_mistral_format else self.preprocessor_config["image_mean"] - image_std = DATASET_STD if self.is_mistral_format else self.preprocessor_config["image_std"] + image_mean = _MISTRAL_COMMON_DATASET_MEAN if self.is_mistral_format else self.preprocessor_config["image_mean"] + image_std = _MISTRAL_COMMON_DATASET_STD if self.is_mistral_format else self.preprocessor_config["image_std"] self.gguf_writer.add_vision_image_mean(image_mean) self.gguf_writer.add_vision_image_std(image_std) @@ -2033,6 +2195,9 @@ def __init__(self, *args, **kwargs): self.hparams["num_attention_heads"] = self.hparams.get("num_attention_heads", 32) def _set_vocab_mistral(self): + if not _mistral_common_installed: + raise ImportError(_mistral_import_error_msg) + vocab = MistralVocab(self.dir_model) logger.info( f"Converting tokenizer {vocab.tokenizer_type} of size {vocab.vocab_size}." @@ -4358,27 +4523,6 @@ def set_gguf_parameters(self): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) self.gguf_writer.add_rope_scaling_factor(1.0) - _has_tok_embd = False - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT) - tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD) - - new_name = self.map_tensor_name(name) - - # assuming token_embd.weight is seen before output.weight - if not self._has_tok_embd and new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT): - # even though the tensor file(s) does not contain the word embeddings they are still in the weight map - if self.tensor_names and "transformer.wte.weight" in self.tensor_names: - logger.debug(f"{tok_embd_name} not found before {output_name}, assuming they are tied") - self.tensor_names.remove("transformer.wte.weight") - elif new_name == tok_embd_name: - self._has_tok_embd = True - - return [(new_name, data_torch)] - @ModelBase.register("InternLM2ForCausalLM") class InternLM2Model(TextModel): @@ -8055,6 +8199,103 @@ def prepare_tensors(self): raise ValueError(f"Unprocessed experts: {experts}") +@ModelBase.register("BailingMoeV2ForCausalLM") +class BailingMoeV2Model(TextModel): + model_arch = gguf.MODEL_ARCH.BAILINGMOE2 + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if nextn_layers := self.hparams.get("num_nextn_predict_layers", 0): + self.block_count = self.hparams["num_hidden_layers"] + nextn_layers + self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count) + + def set_vocab(self): + self._set_vocab_gpt2() + + def set_gguf_parameters(self): + super().set_gguf_parameters() + hparams = self.hparams + if (rope_dim := hparams.get("head_dim")) is None: + rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] + + self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5))) + rope_scaling = self.hparams.get("rope_scaling") or {} + if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) + self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) + self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) + else: + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE) + self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"]) + self.gguf_writer.add_vocab_size(hparams["vocab_size"]) + self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"]) + self.gguf_writer.add_expert_shared_feed_forward_length(hparams.get("moe_shared_expert_intermediate_size", hparams["moe_intermediate_size"] * hparams["num_shared_experts"])) + self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"]) + self.gguf_writer.add_expert_count(hparams["num_experts"]) + self.gguf_writer.add_expert_shared_count(hparams["num_shared_experts"]) + self.gguf_writer.add_expert_group_count(hparams["n_group"]) + self.gguf_writer.add_expert_group_used_count(hparams["topk_group"]) + self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"]) + + if hparams["score_function"] == "sigmoid": + self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID) + elif hparams["score_function"] == "softmax": + self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX) + else: + raise ValueError(f"Unsupported score_function value: {hparams['score_function']}") + + if (nextn_layers := self.hparams.get("num_nextn_predict_layers")) is not None: + self.gguf_writer.add_nextn_predict_layers(nextn_layers) + + _experts: list[dict[str, Tensor]] | None = None + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + if "mlp.experts" in name: + n_experts = self.hparams["num_experts"] + assert bid is not None + + tensors: list[tuple[str, Tensor]] = [] + + if self._experts is None: + self._experts = [{} for _ in range(self.block_count)] + + self._experts[bid][name] = data_torch + + if len(self._experts[bid]) >= n_experts * 3: + # merge the experts into a single 3d tensor + for w_name in ["down_proj", "gate_proj", "up_proj"]: + datas: list[Tensor] = [] + + for xid in range(n_experts): + ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight" + datas.append(self._experts[bid][ename]) + del self._experts[bid][ename] + + data_torch = torch.stack(datas, dim=0) + + merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" + + new_name = self.map_tensor_name(merged_name) + + tensors.append((new_name, data_torch)) + + return tensors + + if name.endswith(".expert_bias"): + name = name.replace(".expert_bias", ".expert_bias.bias") + + return [(self.map_tensor_name(name), data_torch)] + + def prepare_tensors(self): + super().prepare_tensors() + + if self._experts is not None: + # flatten `list[dict[str, Tensor]]` into `list[str]` + experts = [k for d in self._experts for k in d.keys()] + if len(experts) > 0: + raise ValueError(f"Unprocessed experts: {experts}") + + @ModelBase.register("GroveMoeForCausalLM", "modeling_grove_moe.GroveMoeForCausalLM") class GroveMoeModel(TextModel): model_arch = gguf.MODEL_ARCH.GROVEMOE @@ -8713,6 +8954,13 @@ def set_vocab(self): class GptOssModel(TextModel): model_arch = gguf.MODEL_ARCH.GPT_OSS + # TODO: remove once MXFP4 is supported more generally + def dequant_model(self): + quant_config = self.hparams.get("quantization_config") + if quant_config is not None and quant_config.get("quant_method") == "mxfp4": + return + return super().dequant_model() + def transform_nibble_layout(self, tensor): assert tensor.dtype == torch.uint8 assert tensor.shape[-1] == 16 @@ -9115,7 +9363,7 @@ class MistralModel(LlamaModel): @staticmethod def get_community_chat_template(vocab: MistralVocab, templates_dir: Path, is_mistral_format: bool): - assert TokenizerVersion is not None, "mistral_common is not installed" + assert TokenizerVersion is not None and Tekkenizer is not None and SentencePieceTokenizer is not None, _mistral_import_error_msg assert isinstance(vocab.tokenizer, (Tekkenizer, SentencePieceTokenizer)), ( f"Expected Tekkenizer or SentencePieceTokenizer, got {type(vocab.tokenizer)}" ) @@ -9492,11 +9740,9 @@ def main() -> None: logger.info(f"Loading model: {dir_model.name}") - if args.mmproj: - if "mmproj" not in fname_out.name: - fname_out = ModelBase.add_prefix_to_filename(fname_out, "mmproj-") - is_mistral_format = args.mistral_format + if is_mistral_format and not _mistral_common_installed: + raise ImportError(_mistral_import_error_msg) disable_mistral_community_chat_template = args.disable_mistral_community_chat_template with torch.inference_mode(): diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py index 28002f766e23b..0ebc1b160f603 100755 --- a/convert_hf_to_gguf_update.py +++ b/convert_hf_to_gguf_update.py @@ -139,7 +139,7 @@ class TOKENIZER_TYPE(IntEnum): {"name": "lfm2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LiquidAI/LFM2-Tokenizer"}, {"name": "exaone4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LGAI-EXAONE/EXAONE-4.0-32B", }, {"name": "mellum", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/JetBrains/Mellum-4b-base", }, - {"name": "llada-moe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/inclusionAI/LLaDA-MoE-7B-A1B-Base", }, + {"name": "bailingmoe2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/inclusionAI/Ling-mini-base-2.0", }, {"name": "granite-docling", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ibm-granite/granite-docling-258M", }, ] diff --git a/docs/backend/hexagon/CMakeUserPresets.json b/docs/backend/hexagon/CMakeUserPresets.json new file mode 100644 index 0000000000000..e0b19db0f5a23 --- /dev/null +++ b/docs/backend/hexagon/CMakeUserPresets.json @@ -0,0 +1,49 @@ +{ + "version": 4, + "configurePresets": [ + { + "name": "arm64-android-snapdragon", + "hidden": true, + "architecture": { "value": "arm64", "strategy": "external" }, + "toolset": { "value": "host=x86_64", "strategy": "external" }, + "cacheVariables": { + "ANDROID_ABI": "arm64-v8a", + "ANDROID_PLATFORM": "android-31", + "CMAKE_TOOLCHAIN_FILE": "$env{ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake", + "CMAKE_C_FLAGS": "-march=armv8.7a+fp16 -fvectorize -ffp-model=fast -fno-finite-math-only -flto -D_GNU_SOURCE", + "CMAKE_CXX_FLAGS": "-march=armv8.7a+fp16 -fvectorize -ffp-model=fast -fno-finite-math-only -flto -D_GNU_SOURCE", + "CMAKE_C_FLAGS_RELEASE": "-O3 -DNDEBUG", + "CMAKE_CXX_FLAGS_RELEASE": "-O3 -DNDEBUG", + "CMAKE_C_FLAGS_RELWITHDEBINFO": "-O3 -DNDEBUG -g", + "CMAKE_CXX_FLAGS_RELWITHDEBINFO": "-O3 -DNDEBUG -g", + "HEXAGON_SDK_ROOT": "$env{HEXAGON_SDK_ROOT}", + "PREBUILT_LIB_DIR": "android_aarch64", + "GGML_OPENMP": "OFF", + "GGML_LLAMAFILE": "OFF", + "GGML_OPENCL": "ON", + "GGML_HEXAGON": "ON", + "LLAMA_CURL": "OFF" + } + }, + + { + "name": "arm64-windows-snapdragon", + "inherits": [ "base", "arm64-windows-llvm" ], + "cacheVariables": { + "HEXAGON_SDK_ROOT": "$env{HEXAGON_SDK_ROOT}", + "PREBUILT_LIB_DIR": "windows_aarch64", + "GGML_OPENMP": "OFF", + "GGML_LLAMAFILE": "OFF", + "GGML_OPENCL": "ON", + "GGML_HEXAGON": "ON", + "LLAMA_CURL": "OFF" + } + }, + + { "name": "arm64-android-snapdragon-debug" , "inherits": [ "base", "arm64-android-snapdragon", "debug" ] }, + { "name": "arm64-android-snapdragon-release", "inherits": [ "base", "arm64-android-snapdragon", "release" ] }, + + { "name": "arm64-windows-snapdragon-debug" , "inherits": [ "base", "arm64-windows-snapdragon", "debug" ] }, + { "name": "arm64-windows-snapdragon-release", "inherits": [ "base", "arm64-windows-snapdragon", "release" ] } + ] +} diff --git a/docs/backend/hexagon/README.md b/docs/backend/hexagon/README.md new file mode 100644 index 0000000000000..85f136ef9eef0 --- /dev/null +++ b/docs/backend/hexagon/README.md @@ -0,0 +1,239 @@ +# Snapdragon-based Android devices + +## How to Build + +The easiest way to build llama.cpp for a Snapdragon-based Android device is using the toolchain Docker image (see github.com/snapdragon-toolchain). +This image includes Android NDK, OpenCL SDK, Hexagon SDK, CMake, etc. + +This method works on Linux, macOS, and Windows. macOS and Windows users should install Docker Desktop. + +``` +~/src/llama.cpp$ docker run -it -u $(id -u):$(id -g) --volume $(pwd):/workspace --platform linux/amd64 ghcr.io/snapdragon-toolchain/arm64-android:v0.3 +[d]/> cd /workspace +``` + +The rest of the Android build process assumes that you're running inside the toolchain container. +Let's build llama.cpp with CPU, OpenCL, and Hexagon backends via CMake presets: + +``` +[d]/workspace> cp docs/backend/hexagon/CMakeUserPresets.json . + +[d]/workspace> cmake --preset arm64-android-snapdragon-release -B build-snapdragon +Preset CMake variables: + ANDROID_ABI="arm64-v8a" + ... + CMAKE_TOOLCHAIN_FILE="/opt/android-ndk-r28b/build/cmake/android.toolchain.cmake" + GGML_HEXAGON="ON" + GGML_OPENCL="ON" + GGML_OPENMP="OFF" + HEXAGON_SDK_ROOT="/opt/hexagon/6.4.0.2" +... +-- Including OpenCL backend +-- Including Hexagon backend +... +-- Build files have been written to: /workspace/build-snapdragon + +[d]/workspace> cmake --build build-snapdragon +... +[144/356] Performing build step for 'htp-v73' +[1/16] Generating htp_iface_skel.c, htp_iface_stub.c, htp_iface.h +[2/16] Building C object CMakeFiles/ggml-htp-v73.dir/hvx-sigmoid.c.obj +[3/16] Building C object CMakeFiles/ggml-htp-v73.dir/htp-dma.c.obj +[4/16] Building C object CMakeFiles/ggml-htp-v73.dir/worker-pool.c.obj +... +-- Installing: /workspace/build-snapdragon/ggml/src/ggml-hexagon/libggml-htp-v73.so +-- Installing: /workspace/build-snapdragon/ggml/src/ggml-hexagon/libggml-htp-v75.so +... +``` + +To generate an installable "package" simply use cmake --install: + +``` +[d]/workspace> cmake --install build-snapdragon --prefix pkg-adb/llama.cpp +-- Install configuration: "Release" +-- Installing: /workspace/pkg-adb/llama.cpp/lib/libggml-cpu.so +-- Installing: /workspace/pkg-adb/llama.cpp/lib/libggml-opencl.so +-- Installing: /workspace/pkg-adb/llama.cpp/lib/libggml-hexagon.so +-- Installing: /workspace/pkg-adb/llama.cpp/lib/libggml-htp-v73.so +-- Installing: /workspace/pkg-adb/llama.cpp/lib/libggml-htp-v75.so +-- Installing: /workspace/pkg-adb/llama.cpp/lib/libggml-htp-v79.so +-- Installing: /workspace/pkg-adb/llama.cpp/lib/libggml-htp-v81.so +-- Installing: /workspace/pkg-adb/llama.cpp/lib/libggml.so +... +-- Installing: /workspace/pkg-adb/llama.cpp/bin/llama-bench +-- Installing: /workspace/pkg-adb/llama.cpp/bin/llama-cli +... +``` + +## How to Install + +For this step, your device needs to be configured for on-device development. +Please see https://developer.android.com/studio/debug/dev-options for details. + +Once ADB is enabled, use `adb push` to install `pkg-snapdragon` on the device. +**Note that the toolchain Docker image doesn't have ADB and doesn't set up the ADB bridge. Please use native ADB on the host.** + +``` +~/src/llama.cpp$ adb push pkg-adb/llama.cpp /data/local/tmp/ +pkg-adb/llama.cpp/bin/: 67 files pushed, 0 skipped. 190.2 MB/s (919095042 bytes in 4.607s) +pkg-adb/llama.cpp/include/: 19 files pushed, 0 skipped. 20.5 MB/s (255173 bytes in 0.012s) +pkg-adb/llama.cpp/lib/: 16 files pushed, 0 skipped. 144.4 MB/s (43801382 bytes in 0.289s) +102 files pushed, 0 skipped. 186.9 MB/s (963151597 bytes in 4.914s) +``` + +At this point, you should also install some models: + +``` +~/src/llama.cpp$ wget https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q4_0.gguf +... +2025-10-11 12:04:52 (10.7 MB/s) - ‘Llama-3.2-1B-Instruct-Q4_0.gguf’ saved [773025920/773025920] + +~/src/llama.cpp$ adb push Llama-3.2-1B-Instruct-Q4_0.gguf /data/local/tmp/gguf +Llama-3.2-1B-Instruct-Q4_0.gguf: 1 file pushed, 0 skipped. 38.3 MB/s (773025920 bytes in 19.250s) +``` + +## How to Run + +The easiest way to run llama.cpp cli tools is using provided wrapper scripts that properly set up all required environment variables. + +llama.cpp supports three backends on Snapdragon-based devices: CPU, Adreno GPU (GPUOpenCL), and Hexagon NPU (HTP0-4). +You can select which backend to run the model on using the `D=` variable, which maps to the `--device` option. + +Hexagon NPU behaves as a "GPU" device when it comes to `-ngl` and other offload-related options. + +Here are some examples of running various llama.cpp tools via ADB. + +Simple question for Llama-3.2-1B + +``` +~/src/llama.cpp$ M=Llama-3.2-1B-Instruct-Q4_0.gguf D=HTP0 ./scripts/snapdragon/adb/run-cli.sh -no-cnv -p "what is the most popular cookie in the world?" +... +ggml-hex: Hexagon backend (experimental) : allocating new registry : ndev 1 +ggml-hex: Hexagon Arch version v79 +ggml-hex: allocating new session: HTP0 +ggml-hex: new session: HTP0 : session-id 0 domain-id 3 uri file:///libggml-htp-v79.so?htp_iface_skel_handle_invoke&_modver=1.0&_dom=cdsp&_session=0 handle 0xb4000072c7955e50 +... +load_tensors: offloading output layer to GPU +load_tensors: offloaded 17/17 layers to GPU +load_tensors: CPU model buffer size = 225.49 MiB +load_tensors: HTP0 model buffer size = 0.26 MiB +load_tensors: HTP0-REPACK model buffer size = 504.00 MiB +... +I hope this helps you understand the world's most popular cookies! [end of text] +... +llama_perf_sampler_print: sampling time = 30.08 ms / 487 runs ( 0.06 ms per token, 16191.77 tokens per second) +llama_perf_context_print: load time = 617.94 ms +llama_perf_context_print: prompt eval time = 80.76 ms / 11 tokens ( 7.34 ms per token, 136.21 tokens per second) +llama_perf_context_print: eval time = 9210.59 ms / 475 runs ( 19.39 ms per token, 51.57 tokens per second) +llama_perf_context_print: total time = 9454.92 ms / 486 tokens +llama_perf_context_print: graphs reused = 473 +llama_memory_breakdown_print: | memory breakdown [MiB] | total free self model context compute unaccounted | +llama_memory_breakdown_print: | - HTP0 (Hexagon) | 2048 = 2048 + ( 0 = 0 + 0 + 0) + 0 | +llama_memory_breakdown_print: | - Host | 439 = 225 + 136 + 77 | +llama_memory_breakdown_print: | - HTP0-REPACK | 504 = 504 + 0 + 0 | +``` + +Summary request for OLMoE-1B-7B. This is a large model that requires two HTP sessions/devices + +``` +~/src/llama.cpp$ M=OLMoE-1B-7B-0125-Instruct-Q4_0.gguf NDEV=2 D=HTP0,HTP1 ./scripts/snapdragon/adb/run-cli.sh -f surfing.txt -no-cnv +... +ggml-hex: Hexagon backend (experimental) : allocating new registry : ndev 1 +ggml-hex: Hexagon Arch version v81 +ggml-hex: allocating new session: HTP0 +ggml-hex: allocating new session: HTP1 +... +load_tensors: offloading output layer to GPU +load_tensors: offloaded 17/17 layers to GPU +load_tensors: CPU model buffer size = 143.86 MiB +load_tensors: HTP1 model buffer size = 0.23 MiB +load_tensors: HTP1-REPACK model buffer size = 1575.00 MiB +load_tensors: HTP0 model buffer size = 0.28 MiB +load_tensors: HTP0-REPACK model buffer size = 2025.00 MiB +... +llama_context: CPU output buffer size = 0.19 MiB +llama_kv_cache: HTP1 KV buffer size = 238.00 MiB +llama_kv_cache: HTP0 KV buffer size = 306.00 MiB +llama_kv_cache: size = 544.00 MiB ( 8192 cells, 16 layers, 1/1 seqs), K (q8_0): 272.00 MiB, V (q8_0): 272.00 MiB +llama_context: HTP0 compute buffer size = 15.00 MiB +llama_context: HTP1 compute buffer size = 15.00 MiB +llama_context: CPU compute buffer size = 24.56 MiB +... +llama_perf_context_print: prompt eval time = 1730.57 ms / 212 tokens ( 8.16 ms per token, 122.50 tokens per second) +llama_perf_context_print: eval time = 5624.75 ms / 257 runs ( 21.89 ms per token, 45.69 tokens per second) +llama_perf_context_print: total time = 7377.33 ms / 469 tokens +llama_perf_context_print: graphs reused = 255 +llama_memory_breakdown_print: | memory breakdown [MiB] | total free self model context compute unaccounted | +llama_memory_breakdown_print: | - HTP0 (Hexagon) | 2048 = 2048 + ( 0 = 0 + 0 + 0) + 0 | +llama_memory_breakdown_print: | - HTP1 (Hexagon) | 2048 = 2048 + ( 0 = 0 + 0 + 0) + 0 | +llama_memory_breakdown_print: | - Host | 742 = 144 + 544 + 54 | +llama_memory_breakdown_print: | - HTP1-REPACK | 1575 = 1575 + 0 + 0 | +llama_memory_breakdown_print: | - HTP0-REPACK | 2025 = 2025 + 0 + 0 | +``` + +Op test for MUL_MAT + +``` +~/src/llama.cpp$ HB=0 ./scripts/snapdragon/adb/run-tool.sh test-backend-ops -b HTP0 -o MUL_MAT +... +Backend 2/3: HTP0 +Device description: Hexagon +Device memory: 2048 MB (2048 MB free) +MUL_MAT(type_a=q4_0,type_b=f32,m=16,n=1,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],v=0,o=1): OK +MUL_MAT(type_a=q4_0,type_b=f32,m=16,n=2,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],v=0,o=1): OK +MUL_MAT(type_a=q4_0,type_b=f32,m=16,n=3,k=256,bs=[1,1],nr=[1,1],per=[0,1,2,3],v=0,o=1): OK + +~/src/llama.cpp-hexagon$ M=Llama-3.2-1B-Instruct-Q4_0.gguf ./scripts/snapdragon/adb/run-bench.sh -p 128 -n 64 +... +ggml-hex: Hexagon backend (experimental) : allocating new registry : ndev 1 +ggml-hex: Hexagon Arch version v79 +ggml-hex: allocating new session: HTP0 +ggml-hex: new session: HTP0 : session-id 0 domain-id 3 uri file:///libggml-htp-v79.so?htp_iface_skel_handle_invoke&_modver=1.0&_dom=cdsp&_session=0 handle 0xb400007d4b231090 +| model | size | params | backend | ngl | threads | n_batch | mmap | test | t/s | +| ---------------| ---------: | -----: | ---------- | --: | ------: | ------: | ---: | ----: | ------------: | +| llama 1B Q4_0 | 729.75 MiB | 1.24 B | HTP | 99 | 4 | 128 | 0 | pp128 | 169.42 ± 1.75 | +| llama 1B Q4_0 | 729.75 MiB | 1.24 B | HTP | 99 | 4 | 128 | 0 | tg64 | 51.54 ± 1.13 | + +build: 6a8cf8914 (6733) +``` + +## Environment variables + +- `GGML_HEXAGON_NDEV=1` + Controls the number of devices/sessions to allocate. The default is 1. + Most quantized models under 4B fit into a single session; an 8B model needs two, and a 20B model needs four. + +- `GGML_HEXAGON_NHVX=0` + Controls the number of HVX hardware threads to use. The default is all (actual number varies depending on the hardware version). + +- `GGML_HEXAGON_HOSTBUF=1` + Controls whether the Hexagon backend allocates host buffers. By default, all buffers except for REPACK are host buffers. + This option is required for testing Ops that require REPACK buffers (MUL_MAT and MUL_MAT_ID). + +- `GGML_HEXAGON_VERBOSE=1` + Enables verbose logging of Ops from the backend. Example output: + + ``` + ggml-hex: HTP0 graph-compute n_nodes 2 + ggml-hex: HTP0 matmul : blk.27.ffn_up.weight x ffn_norm-27 -> ffn_up-27 : 3072:8192 x 3072:1 -> 8192:1 : q4_0 x f32 -> f32 : HTP0 x HTP0 -> HTP0 : flags 0x1 + ggml-hex: HTP0 matmul : blk.27.ffn_gate.weight x ffn_norm-27 -> ffn_gate-27 : 3072:8192 x 3072:1 -> 8192:1 : q4_0 x f32 -> f32 : HTP0 x HTP0 -> HTP0 : flags 0x3 + ggml-hex: HTP0 graph-compute n_nodes 1 + ggml-hex: HTP0 matmul : blk.27.ffn_down.weight x ffn_gate_par-27 -> ffn_out-27 : 8192:3072 x 8192:1 -> 3072:1 : q4_0 x f32 -> f32 : HTP0 x HTP0 -> HTP0 : flags 0x0 + ggml-hex: HTP0 get-tensor result_output : data 0x7592487000 offset 0 size 513024 + ``` + +- `GGML_HEXAGON_PROFILE=1` + Generates a host-side profile for the ggml-hexagon Ops. + +- `GGML_HEXAGON_OPMASK=0x0` + Allows enabling specific stages of the processing pipeline: + + - `0x1` Enable Op Queue (i.e., queuing Ops into NPU) + - `0x2` Enable Dynamic Quantizer (if needed for the Op) + - `0x4` Enable Op Compute (MUL_MAT, etc.) + + Examples: + + `GGML_HEXAGON_OPMASK=0x1 llama-cli ...` - Ops are enqueued but NPU-side processing is stubbed out + `GGML_HEXAGON_OPMASK=0x3 llama-cli ...` - NPU performs dynamic quantization and skips the rest + `GGML_HEXAGON_OPMASK=0x7 llama-cli ...` - Full queuing and processing of Ops (default) diff --git a/docs/backend/hexagon/developer.md b/docs/backend/hexagon/developer.md new file mode 100644 index 0000000000000..200a7aabc0160 --- /dev/null +++ b/docs/backend/hexagon/developer.md @@ -0,0 +1,109 @@ +# Hexagon backend developer details + +## Backend libraries + +The Hexagon backend consist of two parts: + + - `libggml-hexagon` + This is the regular CPU-side GGML backend library, either shared or statically linked + + - `libggml-htp-vNN` + This is the NPU-side (HTP stands for Hexagon Tensor Processor) shared library that contains the Op dispatcher and kernels. + The correct library is selected automatically at runtime based on the HW version. + +Here is an example of the build artifacts + +``` +~/src/llama.cpp$ ls -l pkg-adb/llama.cpp/lib/libggml* +pkg-adb/llama.cpp/lib/libggml-base.so +pkg-adb/llama.cpp/lib/libggml-cpu.so +pkg-adb/llama.cpp/lib/libggml-hexagon.so <<< CPU library +pkg-adb/llama.cpp/lib/libggml-htp-v73.so <<< HTP op/kernels for Hexagon v73 +pkg-adb/llama.cpp/lib/libggml-htp-v75.so +pkg-adb/llama.cpp/lib/libggml-htp-v79.so +pkg-adb/llama.cpp/lib/libggml-htp-v81.so +``` + +## Memory buffers + +Hexagon NPU backend takes advantage of the Snapdragon's unified memory model where all buffers are fully accessible by the CPU and GPU. +The NPU does have a dedicated tightly-coupled memory called VTCM but that memory is used only for intermediate data (e.g. dynamically +quantized tensors) or temporary data (chunks of the weight tensors fetched via DMA). + +Please note that currently the Hexagon backend does not implement SET/GET_ROWS Ops because there is no advantage in offloading those +to the NPU at this point. + +The backend does allocates non-host buffers for the tensors with datatypes that require repacking: Q4_0, Q8_0, MXFP4. +From the MMU perspective these buffers are still regular buffers (normal access by the CPU) they are marked as non-host simply to force +the repacking. + +## Large model handling + +Hexagon NPU session (aka Process Domain (PD) in the Hexagon docs) is limited to a memory mapping of around 3.5GB. +In llama.cpp/GGML the Hexagon session is mapped to a single GGML backend device (HTP0, HTP1, etc). + +In order to map models larger than 3.5GB we need to allocate multiple devices and split the model. +For this we're taking advantage of the llama.cpp/GGML multi-GPU layer-splitting support. +Each Hexagon device behaves like a GPU from the offload and model splitting perspective. + +Here is an example of running GPT-OSS-20B model on a newer Snapdragon device with 16GB of DDR. + +``` +M=gpt-oss-20b-Q4_0.gguf NDEV=4 D=HTP0,HTP1,HTP2,HTP3 P=surfing.txt scripts/snapdragon/adb/run-cli.sh -no-cnv -f surfing.txt -n 32 +... +LD_LIBRARY_PATH=/data/local/tmp/llama.cpp/lib +ADSP_LIBRARY_PATH=/data/local/tmp/llama.cpp/lib +GGML_HEXAGON_NDEV=4 ./bin/llama-cli --no-mmap -m /data/local/tmp/llama.cpp/../gguf/gpt-oss-20b-Q4_0.gguf + -t 4 --ctx-size 8192 --batch-size 128 -ctk q8_0 -ctv q8_0 -fa on -ngl 99 --device HTP0,HTP1,HTP2,HTP3 -no-cnv -f surfing.txt +... +llama_model_loader: - type f32: 289 tensors +llama_model_loader: - type q4_0: 96 tensors +llama_model_loader: - type q8_0: 2 tensors +llama_model_loader: - type mxfp4: 72 tensors +... +load_tensors: offloaded 25/25 layers to GPU +load_tensors: CPU model buffer size = 1182.09 MiB +load_tensors: HTP1 model buffer size = 6.64 MiB +load_tensors: HTP1-REPACK model buffer size = 2505.94 MiB +load_tensors: HTP3 model buffer size = 5.55 MiB +load_tensors: HTP3-REPACK model buffer size = 2088.28 MiB +load_tensors: HTP0 model buffer size = 7.75 MiB +load_tensors: HTP0-REPACK model buffer size = 2923.59 MiB +load_tensors: HTP2 model buffer size = 6.64 MiB +load_tensors: HTP2-REPACK model buffer size = 2505.94 MiB +... +llama_context: n_ctx_per_seq (8192) < n_ctx_train (131072) -- the full capacity of the model will not be utilized +llama_context: CPU output buffer size = 0.77 MiB +llama_kv_cache_iswa: creating non-SWA KV cache, size = 8192 cells +llama_kv_cache: HTP1 KV buffer size = 25.50 MiB +llama_kv_cache: HTP3 KV buffer size = 25.50 MiB +llama_kv_cache: HTP0 KV buffer size = 25.50 MiB +llama_kv_cache: HTP2 KV buffer size = 25.50 MiB +llama_kv_cache: size = 102.00 MiB ( 8192 cells, 12 layers, 1/1 seqs), K (q8_0): 51.00 MiB, V (q8_0): 51.00 MiB +llama_kv_cache_iswa: creating SWA KV cache, size = 256 cells +llama_kv_cache: HTP1 KV buffer size = 0.80 MiB +llama_kv_cache: HTP3 KV buffer size = 0.53 MiB +llama_kv_cache: HTP0 KV buffer size = 1.06 MiB +llama_kv_cache: HTP2 KV buffer size = 0.80 MiB +llama_kv_cache: size = 3.19 MiB ( 256 cells, 12 layers, 1/1 seqs), K (q8_0): 1.59 MiB, V (q8_0): 1.59 MiB +llama_context: HTP0 compute buffer size = 16.06 MiB +llama_context: HTP1 compute buffer size = 16.06 MiB +llama_context: HTP2 compute buffer size = 16.06 MiB +llama_context: HTP3 compute buffer size = 16.06 MiB +llama_context: CPU compute buffer size = 98.19 MiB +... +llama_perf_context_print: prompt eval time = 3843.67 ms / 197 tokens ( 19.51 ms per token, 51.25 tokens per second) +llama_perf_context_print: eval time = 1686.13 ms / 31 runs ( 54.39 ms per token, 18.39 tokens per second) +llama_perf_context_print: total time = 6266.30 ms / 228 tokens +llama_perf_context_print: graphs reused = 30 +llama_memory_breakdown_print: | memory breakdown [MiB] | total free self model context compute unaccounted | +llama_memory_breakdown_print: | - HTP0 (Hexagon) | 2048 = 2048 + ( 0 = 0 + 0 + 0) + 0 | +llama_memory_breakdown_print: | - HTP1 (Hexagon) | 2048 = 2048 + ( 0 = 0 + 0 + 0) + 0 | +llama_memory_breakdown_print: | - HTP2 (Hexagon) | 2048 = 2048 + ( 0 = 0 + 0 + 0) + 0 | +llama_memory_breakdown_print: | - HTP3 (Hexagon) | 2048 = 2048 + ( 0 = 0 + 0 + 0) + 0 | +llama_memory_breakdown_print: | - Host | 1476 = 1208 + 105 + 162 | +llama_memory_breakdown_print: | - HTP1-REPACK | 2505 = 2505 + 0 + 0 | +llama_memory_breakdown_print: | - HTP3-REPACK | 2088 = 2088 + 0 + 0 | +llama_memory_breakdown_print: | - HTP0-REPACK | 2923 = 2923 + 0 + 0 | +llama_memory_breakdown_print: | - HTP2-REPACK | 2505 = 2505 + 0 + 0 | +``` diff --git a/docs/ops.md b/docs/ops.md index 5df72d25015d2..dfd1cfab6a8b2 100644 --- a/docs/ops.md +++ b/docs/ops.md @@ -22,7 +22,7 @@ Legend: | ARANGE | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | | ARGMAX | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ | ❌ | | ARGSORT | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | -| CEIL | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | +| CEIL | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | | CLAMP | ❌ | ✅ | ✅ | ✅ | 🟡 | 🟡 | ✅ | 🟡 | ❌ | | CONCAT | ❌ | ✅ | ✅ | 🟡 | ✅ | 🟡 | 🟡 | ✅ | ❌ | | CONT | ❌ | 🟡 | ✅ | ✅ | ✅ | 🟡 | 🟡 | 🟡 | ❌ | @@ -42,7 +42,7 @@ Legend: | ELU | ❌ | ✅ | ✅ | 🟡 | 🟡 | ❌ | 🟡 | ❌ | ❌ | | EXP | ❌ | ✅ | ✅ | 🟡 | 🟡 | ❌ | 🟡 | ❌ | ❌ | | FLASH_ATTN_EXT | ❌ | 🟡 | ✅ | 🟡 | 🟡 | ❌ | ❌ | 🟡 | ❌ | -| FLOOR | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | +| FLOOR | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | | GATED_LINEAR_ATTN | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ | | GEGLU | ❌ | ✅ | ✅ | ✅ | 🟡 | ✅ | ✅ | 🟡 | ❌ | | GEGLU_ERF | ❌ | ✅ | ✅ | ✅ | 🟡 | ✅ | ✅ | 🟡 | ❌ | @@ -72,7 +72,7 @@ Legend: | OPT_STEP_SGD | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | | OUT_PROD | 🟡 | ❌ | 🟡 | 🟡 | ❌ | ❌ | 🟡 | ❌ | ❌ | | PAD | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | 🟡 | ✅ | ❌ | -| PAD_REFLECT_1D | ❌ | ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | +| PAD_REFLECT_1D | ❌ | ✅ | ✅ | ❌ | ✅ | ❌ | ✅ | ❌ | ❌ | | POOL_2D | ❌ | 🟡 | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ | ❌ | | REGLU | ❌ | ✅ | ✅ | ✅ | 🟡 | ✅ | ✅ | 🟡 | ❌ | | RELU | ❌ | ✅ | ✅ | 🟡 | 🟡 | 🟡 | 🟡 | 🟡 | ❌ | @@ -84,7 +84,7 @@ Legend: | ROLL | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | | ROPE | ❌ | 🟡 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | | ROPE_BACK | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | -| ROUND | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | +| ROUND | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | | RWKV_WKV6 | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ | ❌ | | RWKV_WKV7 | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ | ❌ | | SCALE | ❌ | 🟡 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | @@ -100,8 +100,8 @@ Legend: | SOFT_MAX_BACK | ❌ | ❌ | 🟡 | 🟡 | ❌ | ❌ | 🟡 | ✅ | ❌ | | SQR | ❌ | ✅ | ✅ | ✅ | 🟡 | ❌ | ✅ | 🟡 | ❌ | | SQRT | ❌ | ✅ | ✅ | ✅ | 🟡 | ❌ | ✅ | ❌ | ❌ | -| SSM_CONV | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | -| SSM_SCAN | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | +| SSM_CONV | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ❌ | +| SSM_SCAN | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ❌ | | STEP | ❌ | ✅ | ✅ | 🟡 | 🟡 | ❌ | 🟡 | ❌ | ❌ | | SUB | ❌ | ✅ | ✅ | ✅ | 🟡 | 🟡 | ✅ | ✅ | ❌ | | SUM | ❌ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | @@ -111,6 +111,6 @@ Legend: | TANH | ❌ | ✅ | ✅ | 🟡 | 🟡 | ✅ | 🟡 | 🟡 | ❌ | | TIMESTEP_EMBEDDING | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | | TOPK_MOE | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | -| TRUNC | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | +| TRUNC | ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | | UPSCALE | ❌ | 🟡 | ✅ | ✅ | 🟡 | ✅ | 🟡 | ✅ | ❌ | | XIELU | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | diff --git a/docs/ops/SYCL.csv b/docs/ops/SYCL.csv index d7efa43cdf3da..fe6876357f359 100644 --- a/docs/ops/SYCL.csv +++ b/docs/ops/SYCL.csv @@ -31,6 +31,14 @@ "SYCL0","GELU_ERF","type=f16,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" "SYCL0","XIELU","type=f16,ne_a=[128,2,2,2],v=0","support","0","no","SYCL" "SYCL0","XIELU","type=f16,ne_a=[5,7,11,13],v=0","support","0","no","SYCL" +"SYCL0","FLOOR","type=f16,ne_a=[128,2,2,2],v=0","support","1","yes","SYCL" +"SYCL0","FLOOR","type=f16,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" +"SYCL0","CEIL","type=f16,ne_a=[128,2,2,2],v=0","support","1","yes","SYCL" +"SYCL0","CEIL","type=f16,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" +"SYCL0","ROUND","type=f16,ne_a=[128,2,2,2],v=0","support","1","yes","SYCL" +"SYCL0","ROUND","type=f16,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" +"SYCL0","TRUNC","type=f16,ne_a=[128,2,2,2],v=0","support","1","yes","SYCL" +"SYCL0","TRUNC","type=f16,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" "SYCL0","ABS","type=f16,ne_a=[128,2,2,2],v=1","support","0","no","SYCL" "SYCL0","ABS","type=f16,ne_a=[5,7,11,13],v=1","support","0","no","SYCL" "SYCL0","SGN","type=f16,ne_a=[128,2,2,2],v=1","support","0","no","SYCL" @@ -95,6 +103,14 @@ "SYCL0","GELU_ERF","type=f32,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" "SYCL0","XIELU","type=f32,ne_a=[128,2,2,2],v=0","support","0","no","SYCL" "SYCL0","XIELU","type=f32,ne_a=[5,7,11,13],v=0","support","0","no","SYCL" +"SYCL0","FLOOR","type=f32,ne_a=[128,2,2,2],v=0","support","1","yes","SYCL" +"SYCL0","FLOOR","type=f32,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" +"SYCL0","CEIL","type=f32,ne_a=[128,2,2,2],v=0","support","1","yes","SYCL" +"SYCL0","CEIL","type=f32,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" +"SYCL0","ROUND","type=f32,ne_a=[128,2,2,2],v=0","support","1","yes","SYCL" +"SYCL0","ROUND","type=f32,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" +"SYCL0","TRUNC","type=f32,ne_a=[128,2,2,2],v=0","support","1","yes","SYCL" +"SYCL0","TRUNC","type=f32,ne_a=[5,7,11,13],v=0","support","1","yes","SYCL" "SYCL0","ABS","type=f32,ne_a=[128,2,2,2],v=1","support","0","no","SYCL" "SYCL0","ABS","type=f32,ne_a=[5,7,11,13],v=1","support","0","no","SYCL" "SYCL0","SGN","type=f32,ne_a=[128,2,2,2],v=1","support","0","no","SYCL" @@ -9363,8 +9379,8 @@ "SYCL0","ACC","type=f32,ne_a=[256,17,1,1],ne_b=[256,16,1,1]","support","1","yes","SYCL" "SYCL0","PAD","type=f32,ne_a=[512,512,1,1],pad_0=1,pad_1=1","support","1","yes","SYCL" "SYCL0","PAD","type=f32,ne_a=[512,512,3,1],lp0=1,rp0=1,lp1=1,rp1=1,lp2=1,rp2=1,lp3=1,rp3=1,v=0","support","1","yes","SYCL" -"SYCL0","PAD_REFLECT_1D","type=f32,ne_a=[512,34,2,1],pad_0=10,pad_1=9","support","0","no","SYCL" -"SYCL0","PAD_REFLECT_1D","type=f32,ne_a=[3000,384,4,1],pad_0=10,pad_1=9","support","0","no","SYCL" +"SYCL0","PAD_REFLECT_1D","type=f32,ne_a=[3000,384,4,1],pad_0=10,pad_1=9","support","0","yes","SYCL" +"SYCL0","PAD_REFLECT_1D","type=f32,ne_a=[512,34,2,1],pad_0=10,pad_1=9","support","0","yes","SYCL" "SYCL0","ROLL","shift0=3,shift1=-2,shift3=1,shift4=-1","support","0","no","SYCL" "SYCL0","ARANGE","type=f32,start=0.000000,stop=10.000000,step=1.000000","support","0","no","SYCL" "SYCL0","TIMESTEP_EMBEDDING","type=f32,ne_a=[2,1,1,1],dim=320,max_period=10000","support","1","yes","SYCL" diff --git a/docs/ops/Vulkan.csv b/docs/ops/Vulkan.csv index ea252577280d5..298c2a6ccd5fc 100644 --- a/docs/ops/Vulkan.csv +++ b/docs/ops/Vulkan.csv @@ -3263,27 +3263,27 @@ "Vulkan0","RMS_NORM_MUL_ADD","type=f32,ne=[64,5,4,3],eps=1.000000,broadcast=0","support","1","yes","Vulkan" "Vulkan0","RMS_NORM_MUL_ADD","type=f32,ne=[64,5,4,3],eps=1.000000,broadcast=1","support","1","yes","Vulkan" "Vulkan0","L2_NORM","type=f32,ne=[64,5,4,3]","support","1","yes","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1024,1,1],ne_b=[3,1024,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[8,1024,1,1],ne_b=[3,1024,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1024,4,1],ne_b=[3,1024,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1536,1,1],ne_b=[3,1536,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[8,1536,1,1],ne_b=[3,1536,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1536,4,1],ne_b=[3,1536,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[4,2048,1,1],ne_b=[3,2048,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[8,2048,1,1],ne_b=[3,2048,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[4,2048,4,1],ne_b=[3,2048,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1024,1,1],ne_b=[4,1024,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[8,1024,1,1],ne_b=[4,1024,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1024,4,1],ne_b=[4,1024,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1536,1,1],ne_b=[4,1536,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[8,1536,1,1],ne_b=[4,1536,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1536,4,1],ne_b=[4,1536,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[4,2048,1,1],ne_b=[4,2048,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[8,2048,1,1],ne_b=[4,2048,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_CONV","type=f32,ne_a=[4,2048,4,1],ne_b=[4,2048,1,1]","support","0","no","Vulkan" -"Vulkan0","SSM_SCAN","type=f32,d_state=16,head_dim=1,n_head=1024,n_group=1,n_seq_tokens=32,n_seqs=4","support","0","no","Vulkan" -"Vulkan0","SSM_SCAN","type=f32,d_state=128,head_dim=64,n_head=16,n_group=2,n_seq_tokens=32,n_seqs=4","support","0","no","Vulkan" -"Vulkan0","SSM_SCAN","type=f32,d_state=256,head_dim=64,n_head=8,n_group=2,n_seq_tokens=32,n_seqs=4","support","0","no","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1024,1,1],ne_b=[3,1024,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[8,1024,1,1],ne_b=[3,1024,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1024,4,1],ne_b=[3,1024,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1536,1,1],ne_b=[3,1536,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[8,1536,1,1],ne_b=[3,1536,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1536,4,1],ne_b=[3,1536,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[4,2048,1,1],ne_b=[3,2048,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[8,2048,1,1],ne_b=[3,2048,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[4,2048,4,1],ne_b=[3,2048,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1024,1,1],ne_b=[4,1024,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[8,1024,1,1],ne_b=[4,1024,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1024,4,1],ne_b=[4,1024,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1536,1,1],ne_b=[4,1536,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[8,1536,1,1],ne_b=[4,1536,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[4,1536,4,1],ne_b=[4,1536,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[4,2048,1,1],ne_b=[4,2048,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[8,2048,1,1],ne_b=[4,2048,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_CONV","type=f32,ne_a=[4,2048,4,1],ne_b=[4,2048,1,1]","support","1","yes","Vulkan" +"Vulkan0","SSM_SCAN","type=f32,d_state=16,head_dim=1,n_head=1024,n_group=1,n_seq_tokens=32,n_seqs=4","support","1","yes","Vulkan" +"Vulkan0","SSM_SCAN","type=f32,d_state=128,head_dim=64,n_head=16,n_group=2,n_seq_tokens=32,n_seqs=4","support","1","yes","Vulkan" +"Vulkan0","SSM_SCAN","type=f32,d_state=256,head_dim=64,n_head=8,n_group=2,n_seq_tokens=32,n_seqs=4","support","1","yes","Vulkan" "Vulkan0","RWKV_WKV6","type=f32,head_count=32,head_size=64,n_seq_tokens=1,n_seqs=1","support","1","yes","Vulkan" "Vulkan0","RWKV_WKV6","type=f32,head_count=32,head_size=64,n_seq_tokens=32,n_seqs=1","support","1","yes","Vulkan" "Vulkan0","RWKV_WKV6","type=f32,head_count=32,head_size=64,n_seq_tokens=32,n_seqs=4","support","1","yes","Vulkan" diff --git a/examples/model-conversion/scripts/causal/run-org-model.py b/examples/model-conversion/scripts/causal/run-org-model.py index 9444c713d03ab..7fb55e9af1f52 100755 --- a/examples/model-conversion/scripts/causal/run-org-model.py +++ b/examples/model-conversion/scripts/causal/run-org-model.py @@ -138,7 +138,7 @@ def fn(_m, input, output): "Model path must be specified either via --model-path argument or MODEL_PATH environment variable" ) -config = AutoConfig.from_pretrained(model_path) +config = AutoConfig.from_pretrained(model_path, trust_remote_code=True) print("Model type: ", config.model_type) print("Vocab size: ", config.vocab_size) @@ -148,8 +148,8 @@ def fn(_m, input, output): print("EOS token id: ", config.eos_token_id) print("Loading model and tokenizer using AutoTokenizer:", model_path) -tokenizer = AutoTokenizer.from_pretrained(model_path) -config = AutoConfig.from_pretrained(model_path) +tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) +config = AutoConfig.from_pretrained(model_path, trust_remote_code=True) if unreleased_model_name: model_name_lower = unreleased_model_name.lower() @@ -171,7 +171,7 @@ def fn(_m, input, output): exit(1) else: model = AutoModelForCausalLM.from_pretrained( - model_path, device_map="auto", offload_folder="offload" + model_path, device_map="auto", offload_folder="offload", trust_remote_code=True ) for name, module in model.named_modules(): diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index 73032be68e153..181f179ed171c 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -251,6 +251,8 @@ option(GGML_OPENCL_USE_ADRENO_KERNELS "ggml: use optimized kernels for Adr set (GGML_OPENCL_TARGET_VERSION "300" CACHE STRING "gmml: OpenCL API version to target") +option(GGML_HEXAGON "ggml: enable Hexagon backend" OFF) + # toolchain for vulkan-shaders-gen set (GGML_VULKAN_SHADERS_GEN_TOOLCHAIN "" CACHE FILEPATH "ggml: toolchain file for vulkan-shaders-gen") diff --git a/ggml/include/ggml-hexagon.h b/ggml/include/ggml-hexagon.h new file mode 100644 index 0000000000000..6e07900410393 --- /dev/null +++ b/ggml/include/ggml-hexagon.h @@ -0,0 +1,19 @@ +#pragma once + +#include "ggml.h" +#include "ggml-backend.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// backend API +GGML_BACKEND_API ggml_backend_t ggml_backend_hexagon_init(void); + +GGML_BACKEND_API bool ggml_backend_is_hexagon(ggml_backend_t backend); + +GGML_BACKEND_API ggml_backend_reg_t ggml_backend_hexagon_reg(void); + +#ifdef __cplusplus +} +#endif diff --git a/ggml/include/ggml-rpc.h b/ggml/include/ggml-rpc.h index 72eff0027351a..e6dca3f62b09c 100644 --- a/ggml/include/ggml-rpc.h +++ b/ggml/include/ggml-rpc.h @@ -21,8 +21,7 @@ GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const c GGML_BACKEND_API void ggml_backend_rpc_get_device_memory(const char * endpoint, uint32_t device, size_t * free, size_t * total); GGML_BACKEND_API void ggml_backend_rpc_start_server(const char * endpoint, const char * cache_dir, - size_t n_threads, size_t n_devices, - ggml_backend_dev_t * devices, size_t * free_mem, size_t * total_mem); + size_t n_threads, size_t n_devices, ggml_backend_dev_t * devices); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_rpc_reg(void); GGML_BACKEND_API ggml_backend_reg_t ggml_backend_rpc_add_server(const char * endpoint); diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index 892c23318a18e..ba281b8e6d17a 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -307,6 +307,10 @@ function(ggml_add_cpu_backend_variant tag_name) foreach (feat ${ARGN}) set(GGML_INTERNAL_${feat} ON) endforeach() + elseif (GGML_SYSTEM_ARCH STREQUAL "s390x") + foreach (feat ${ARGN}) + set(GGML_INTERNAL_${feat} ON) + endforeach() endif() ggml_add_cpu_backend_variant_impl(${tag_name}) @@ -371,6 +375,14 @@ if (GGML_CPU_ALL_VARIANTS) else() message(FATAL_ERROR "Unsupported PowerPC target OS: ${CMAKE_SYSTEM_NAME}") endif() + elseif (GGML_SYSTEM_ARCH STREQUAL "s390x") + if (CMAKE_SYSTEM_NAME MATCHES "Linux") + ggml_add_cpu_backend_variant(s390x_z15 Z15 VXE) + # ggml_add_cpu_backend_variant(s390x_z16 Z16 VXE) + # ggml_add_cpu_backend_variant(s390x_z17 Z17 VXE) + else() + message(FATAL_ERROR "Unsupported s390x target OS: ${CMAKE_SYSTEM_NAME}") + endif() else() message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS not yet supported with ${GGML_SYSTEM_ARCH} on ${CMAKE_SYSTEM_NAME}") endif() @@ -390,6 +402,7 @@ ggml_add_backend(Vulkan) ggml_add_backend(WebGPU) ggml_add_backend(zDNN) ggml_add_backend(OpenCL) +ggml_add_backend(Hexagon) foreach (target ggml-base ggml) target_include_directories(${target} PUBLIC $ $) diff --git a/ggml/src/ggml-alloc.c b/ggml/src/ggml-alloc.c index 929bc4488156f..c830c09655fec 100644 --- a/ggml/src/ggml-alloc.c +++ b/ggml/src/ggml-alloc.c @@ -598,6 +598,26 @@ static bool ggml_gallocr_is_allocated(ggml_gallocr_t galloc, struct ggml_tensor return t->data != NULL || ggml_gallocr_hash_get(galloc, t)->allocated; } +// free the extra space at the end if the new tensor is smaller +static void ggml_gallocr_free_extra_space(ggml_gallocr_t galloc, struct ggml_tensor * node, struct ggml_tensor * parent) { + struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); + struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent); + + size_t parent_size = ggml_backend_buft_get_alloc_size(galloc->bufts[p_hn->buffer_id], parent); + size_t node_size = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node); + + GGML_ASSERT(parent_size >= node_size); + + if (parent_size > node_size) { + struct ggml_dyn_tallocr * p_alloc = galloc->buf_tallocs[p_hn->buffer_id]; + struct buffer_address p_addr = p_hn->addr; + p_addr.offset += node_size; + size_t extra_size = parent_size - node_size; + AT_PRINTF("freeing extra %zu bytes from parent %s for %s\n", extra_size, parent->name, node->name); + ggml_dyn_tallocr_free_tensor(p_alloc, p_addr, extra_size, parent); + } +} + static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) { GGML_ASSERT(buffer_id >= 0); struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); @@ -643,6 +663,7 @@ static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor hn->addr = p_hn->addr; p_hn->allocated = false; // avoid freeing the parent view_src_hn->allocated = false; + ggml_gallocr_free_extra_space(galloc, node, view_src); return; } } else { @@ -650,6 +671,7 @@ static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor hn->buffer_id = p_hn->buffer_id; hn->addr = p_hn->addr; p_hn->allocated = false; // avoid freeing the parent + ggml_gallocr_free_extra_space(galloc, node, parent); return; } } diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp index 136afec748d96..e96b5c403dd3f 100644 --- a/ggml/src/ggml-backend-reg.cpp +++ b/ggml/src/ggml-backend-reg.cpp @@ -57,6 +57,10 @@ #include "ggml-opencl.h" #endif +#ifdef GGML_USE_HEXAGON +#include "ggml-hexagon.h" +#endif + #ifdef GGML_USE_BLAS #include "ggml-blas.h" #endif @@ -199,6 +203,9 @@ struct ggml_backend_registry { #ifdef GGML_USE_OPENCL register_backend(ggml_backend_opencl_reg()); #endif +#ifdef GGML_USE_HEXAGON + register_backend(ggml_backend_hexagon_reg()); +#endif #ifdef GGML_USE_CANN register_backend(ggml_backend_cann_reg()); #endif @@ -598,6 +605,7 @@ void ggml_backend_load_all_from_path(const char * dir_path) { ggml_backend_load_best("sycl", silent, dir_path); ggml_backend_load_best("vulkan", silent, dir_path); ggml_backend_load_best("opencl", silent, dir_path); + ggml_backend_load_best("hexagon", silent, dir_path); ggml_backend_load_best("musa", silent, dir_path); ggml_backend_load_best("cpu", silent, dir_path); // check the environment variable GGML_BACKEND_PATH to load an out-of-tree backend diff --git a/ggml/src/ggml-cann/acl_tensor.cpp b/ggml/src/ggml-cann/acl_tensor.cpp old mode 100755 new mode 100644 index 8ffac31dd661a..8958ebcd78704 --- a/ggml/src/ggml-cann/acl_tensor.cpp +++ b/ggml/src/ggml-cann/acl_tensor.cpp @@ -51,28 +51,31 @@ aclDataType ggml_cann_type_mapping(ggml_type type) { return ACL_DT_UNDEFINED; } -aclTensor* ggml_cann_create_tensor(const ggml_tensor* tensor, int64_t* ne, - size_t* nb, int64_t dims, aclFormat format, - size_t offset) { +aclTensor * ggml_cann_create_tensor(const ggml_tensor * tensor, + int64_t * ne, + size_t * nb, + int64_t dims, + aclFormat format, + size_t offset) { // If tensor is bcasted, Up to GGML_MAX_DIMS additional dimensions will be // added. int64_t acl_ne[GGML_MAX_DIMS * 2], acl_stride[GGML_MAX_DIMS * 2]; if (ne == nullptr) { for (int i = 0; i < GGML_MAX_DIMS; i++) { - acl_ne[i] = tensor->ne[i]; + acl_ne[i] = tensor->ne[i]; // The step size of acl is in elements. acl_stride[i] = tensor->nb[i] / ggml_element_size(tensor); } } else { // With bcast for (int i = 0; i < dims; i++) { - acl_ne[i] = ne[i]; + acl_ne[i] = ne[i]; acl_stride[i] = nb[i] / ggml_element_size(tensor); } } - int64_t final_dims = (dims == 0 ? GGML_MAX_DIMS : dims); + int64_t final_dims = (dims == 0 ? GGML_MAX_DIMS : dims); int64_t acl_storage_len = 1; for (int i = 0; i < final_dims; i++) { acl_storage_len += (acl_ne[i] - 1) * acl_stride[i]; @@ -84,15 +87,13 @@ aclTensor* ggml_cann_create_tensor(const ggml_tensor* tensor, int64_t* ne, std::reverse(acl_ne, acl_ne + final_dims); std::reverse(acl_stride, acl_stride + final_dims); - aclTensor* acl_tensor = aclCreateTensor( - acl_ne, final_dims, ggml_cann_type_mapping(tensor->type), acl_stride, - elem_offset, format, &acl_storage_len, 1, - tensor->data); + aclTensor * acl_tensor = aclCreateTensor(acl_ne, final_dims, ggml_cann_type_mapping(tensor->type), acl_stride, + elem_offset, format, &acl_storage_len, 1, tensor->data); return acl_tensor; } -bool ggml_cann_need_bcast(const ggml_tensor* t0, const ggml_tensor* t1) { +bool ggml_cann_need_bcast(const ggml_tensor * t0, const ggml_tensor * t1) { for (int i = 0; i < GGML_MAX_DIMS; i++) { if (t1->ne[i] != t0->ne[i] && t1->ne[i] != 1) { return true; @@ -101,15 +102,16 @@ bool ggml_cann_need_bcast(const ggml_tensor* t0, const ggml_tensor* t1) { return false; } -int64_t ggml_cann_get_bcast_shape(const ggml_tensor* src0, - const ggml_tensor* src1, - int64_t* bcast_src0_ne, - int64_t* bcast_src1_ne, size_t* bcast_src0_nb, - size_t* bcast_src1_nb) { +int64_t ggml_cann_get_bcast_shape(const ggml_tensor * src0, + const ggml_tensor * src1, + int64_t * bcast_src0_ne, + int64_t * bcast_src1_ne, + size_t * bcast_src0_nb, + size_t * bcast_src1_nb) { GGML_ASSERT(ggml_can_repeat(src1, src0)); int bcast_dim_cnt = 0; for (int i = 0; i < GGML_MAX_DIMS; i++) { - int64_t nr = src0->ne[i] / src1->ne[i]; + int64_t nr = src0->ne[i] / src1->ne[i]; bcast_src0_ne[bcast_dim_cnt] = src0->ne[i] / nr; bcast_src1_ne[bcast_dim_cnt] = src1->ne[i]; bcast_src0_nb[bcast_dim_cnt] = src0->nb[i]; @@ -119,21 +121,26 @@ int64_t ggml_cann_get_bcast_shape(const ggml_tensor* src0, // Need to add an extra dim. bcast_src0_ne[bcast_dim_cnt] = nr; bcast_src1_ne[bcast_dim_cnt] = 1; - bcast_src0_nb[bcast_dim_cnt] = bcast_src0_nb[bcast_dim_cnt - 1] * - bcast_src0_ne[bcast_dim_cnt - 1]; - bcast_src1_nb[bcast_dim_cnt] = bcast_src1_nb[bcast_dim_cnt - 1] * - bcast_src1_ne[bcast_dim_cnt - 1]; + bcast_src0_nb[bcast_dim_cnt] = bcast_src0_nb[bcast_dim_cnt - 1] * bcast_src0_ne[bcast_dim_cnt - 1]; + bcast_src1_nb[bcast_dim_cnt] = bcast_src1_nb[bcast_dim_cnt - 1] * bcast_src1_ne[bcast_dim_cnt - 1]; bcast_dim_cnt++; } } return bcast_dim_cnt; } -int64_t ggml_cann_get_mulmat_bcast_shape( - const int64_t* input_ne, const int64_t* weight_ne, const int64_t* dst_ne, - const size_t* input_nb, const size_t* weight_nb, const size_t* dst_nb, - int64_t* bcast_input_ne, int64_t* bcast_weight_ne, int64_t* bcast_dst_ne, - size_t* bcast_input_nb, size_t* bcast_weight_nb, size_t* bcast_dst_nb) { +int64_t ggml_cann_get_mulmat_bcast_shape(const int64_t * input_ne, + const int64_t * weight_ne, + const int64_t * dst_ne, + const size_t * input_nb, + const size_t * weight_nb, + const size_t * dst_nb, + int64_t * bcast_input_ne, + int64_t * bcast_weight_ne, + int64_t * bcast_dst_ne, + size_t * bcast_input_nb, + size_t * bcast_weight_nb, + size_t * bcast_dst_nb) { // input and dst shoule in same shape, except first two dims. GGML_ASSERT(input_ne[2] == dst_ne[2]); GGML_ASSERT(input_ne[3] == dst_ne[3]); @@ -148,34 +155,30 @@ int64_t ggml_cann_get_mulmat_bcast_shape( // Do not use bcast in the first two dimensions because we only support // the bcast batch dimension. Just copy them. if (i < 2 || nr == 1) { - bcast_input_ne[bcast_dim_cnt] = input_ne[i]; + bcast_input_ne[bcast_dim_cnt] = input_ne[i]; bcast_weight_ne[bcast_dim_cnt] = weight_ne[i]; - bcast_dst_ne[bcast_dim_cnt] = dst_ne[i]; + bcast_dst_ne[bcast_dim_cnt] = dst_ne[i]; - bcast_input_nb[bcast_dim_cnt] = input_nb[i]; + bcast_input_nb[bcast_dim_cnt] = input_nb[i]; bcast_weight_nb[bcast_dim_cnt] = weight_nb[i]; - bcast_dst_nb[bcast_dim_cnt] = dst_nb[i]; + bcast_dst_nb[bcast_dim_cnt] = dst_nb[i]; bcast_dim_cnt++; } else { // Need to add an extra dim. - bcast_input_ne[bcast_dim_cnt] = nr; - bcast_dst_ne[bcast_dim_cnt] = nr; + bcast_input_ne[bcast_dim_cnt] = nr; + bcast_dst_ne[bcast_dim_cnt] = nr; bcast_weight_ne[bcast_dim_cnt] = 1; - bcast_input_nb[bcast_dim_cnt] = input_nb[i]; - bcast_dst_nb[bcast_dim_cnt] = dst_nb[i]; + bcast_input_nb[bcast_dim_cnt] = input_nb[i]; + bcast_dst_nb[bcast_dim_cnt] = dst_nb[i]; bcast_weight_nb[bcast_dim_cnt] = weight_nb[i]; bcast_dim_cnt++; - bcast_input_ne[bcast_dim_cnt] = input_ne[i] / nr; - bcast_dst_ne[bcast_dim_cnt] = dst_ne[i] / nr; + bcast_input_ne[bcast_dim_cnt] = input_ne[i] / nr; + bcast_dst_ne[bcast_dim_cnt] = dst_ne[i] / nr; bcast_weight_ne[bcast_dim_cnt] = weight_ne[i]; - bcast_input_nb[bcast_dim_cnt] = bcast_input_nb[bcast_dim_cnt - 1] * - bcast_input_ne[bcast_dim_cnt - 1]; - bcast_dst_nb[bcast_dim_cnt] = bcast_dst_nb[bcast_dim_cnt - 1] * - bcast_dst_ne[bcast_dim_cnt - 1]; - bcast_weight_nb[bcast_dim_cnt] = - bcast_weight_nb[bcast_dim_cnt - 1] * - bcast_weight_ne[bcast_dim_cnt - 1]; + bcast_input_nb[bcast_dim_cnt] = bcast_input_nb[bcast_dim_cnt - 1] * bcast_input_ne[bcast_dim_cnt - 1]; + bcast_dst_nb[bcast_dim_cnt] = bcast_dst_nb[bcast_dim_cnt - 1] * bcast_dst_ne[bcast_dim_cnt - 1]; + bcast_weight_nb[bcast_dim_cnt] = bcast_weight_nb[bcast_dim_cnt - 1] * bcast_weight_ne[bcast_dim_cnt - 1]; bcast_dim_cnt++; } } diff --git a/ggml/src/ggml-cann/acl_tensor.h b/ggml/src/ggml-cann/acl_tensor.h old mode 100755 new mode 100644 index 93f09937efb31..cb17ebcc1bbe2 --- a/ggml/src/ggml-cann/acl_tensor.h +++ b/ggml/src/ggml-cann/acl_tensor.h @@ -62,10 +62,12 @@ aclDataType ggml_cann_type_mapping(ggml_type type); * @param offset Offset in bytes for the ACL tensor data. Defaults to 0. * @return Pointer to the created ACL tensor. */ -aclTensor* ggml_cann_create_tensor(const ggml_tensor* tensor, int64_t* ne = nullptr, - size_t* nb = nullptr, int64_t dims = 0, - aclFormat format = ACL_FORMAT_ND, - size_t offset = 0); +aclTensor * ggml_cann_create_tensor(const ggml_tensor * tensor, + int64_t * ne = nullptr, + size_t * nb = nullptr, + int64_t dims = 0, + aclFormat format = ACL_FORMAT_ND, + size_t offset = 0); /** * @brief Template for creating an ACL tensor from provided parameters. typename TYPE @@ -87,12 +89,15 @@ aclTensor* ggml_cann_create_tensor(const ggml_tensor* tensor, int64_t* ne = null * @param offset Offset in bytes for the ACL tensor data. Defaults to 0. * @return Pointer to the created ACL tensor. */ -template -aclTensor* ggml_cann_create_tensor(void* data_ptr, aclDataType dtype, - TYPE type_size, int64_t* ne, TYPE* nb, - int64_t dims, - aclFormat format = ACL_FORMAT_ND, - size_t offset = 0) { +template +aclTensor * ggml_cann_create_tensor(void * data_ptr, + aclDataType dtype, + TYPE type_size, + int64_t * ne, + TYPE * nb, + int64_t dims, + aclFormat format = ACL_FORMAT_ND, + size_t offset = 0) { int64_t tmp_ne[GGML_MAX_DIMS * 2]; int64_t tmp_stride[GGML_MAX_DIMS * 2]; @@ -109,9 +114,8 @@ aclTensor* ggml_cann_create_tensor(void* data_ptr, aclDataType dtype, std::reverse(tmp_ne, tmp_ne + dims); std::reverse(tmp_stride, tmp_stride + dims); - aclTensor* acl_tensor = - aclCreateTensor(tmp_ne, dims, dtype, tmp_stride, offset / type_size, - format, &acl_storage_len, 1, data_ptr); + aclTensor * acl_tensor = + aclCreateTensor(tmp_ne, dims, dtype, tmp_stride, offset / type_size, format, &acl_storage_len, 1, data_ptr); return acl_tensor; } @@ -132,7 +136,7 @@ aclTensor* ggml_cann_create_tensor(void* data_ptr, aclDataType dtype, * to 1. If such a dimension is found, broadcasting is required to align t1 * with t0 for element-wise operations. */ -bool ggml_cann_need_bcast(const ggml_tensor* t0, const ggml_tensor* t1); +bool ggml_cann_need_bcast(const ggml_tensor * t0, const ggml_tensor * t1); /** * @brief Computes broadcast shapes and strides for two ggml_tensors. @@ -187,19 +191,21 @@ bool ggml_cann_need_bcast(const ggml_tensor* t0, const ggml_tensor* t1); * dim1 in a inserted dim, should add nb for dim1, * and all other nb moves to next in order. */ -int64_t ggml_cann_get_bcast_shape(const ggml_tensor* src0, const ggml_tensor* src1, - int64_t* bcast_ne_src0, int64_t* bcast_ne_src1, - size_t* bcast_nb_src0, size_t* bcast_nb_src1); +int64_t ggml_cann_get_bcast_shape(const ggml_tensor * src0, + const ggml_tensor * src1, + int64_t * bcast_ne_src0, + int64_t * bcast_ne_src1, + size_t * bcast_nb_src0, + size_t * bcast_nb_src1); // Bcast macro to avoid duplicate code. -#define BCAST_SHAPE(src0, src1) \ - int64_t bcast_##src0##_ne[GGML_MAX_DIMS * 2]; \ - int64_t bcast_##src1##_ne[GGML_MAX_DIMS * 2]; \ - size_t bcast_##src0##_nb[GGML_MAX_DIMS * 2]; \ - size_t bcast_##src1##_nb[GGML_MAX_DIMS * 2]; \ - int64_t bcast_dims = ggml_cann_get_bcast_shape( \ - src0, src1, bcast_##src0##_ne, bcast_##src1##_ne, bcast_##src0##_nb, \ - bcast_##src1##_nb); +#define BCAST_SHAPE(src0, src1) \ + int64_t bcast_##src0##_ne[GGML_MAX_DIMS * 2]; \ + int64_t bcast_##src1##_ne[GGML_MAX_DIMS * 2]; \ + size_t bcast_##src0##_nb[GGML_MAX_DIMS * 2]; \ + size_t bcast_##src1##_nb[GGML_MAX_DIMS * 2]; \ + int64_t bcast_dims = ggml_cann_get_bcast_shape(src0, src1, bcast_##src0##_ne, bcast_##src1##_ne, \ + bcast_##src0##_nb, bcast_##src1##_nb); #define BCAST_PARAM(tensor) bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims @@ -233,26 +239,31 @@ int64_t ggml_cann_get_bcast_shape(const ggml_tensor* src0, const ggml_tensor* sr * before cast dim. * @sa ggml_cann_get_bcast_shape */ -int64_t ggml_cann_get_mulmat_bcast_shape( - const int64_t* input_ne, const int64_t* weight_ne, const int64_t* dst_ne, - const size_t* input_nb, const size_t* weight_nb, const size_t* dst_nb, - int64_t* bcast_input_ne, int64_t* bcast_weight_ne, int64_t* bcast_dst_ne, - size_t* bcast_input_nb, size_t* bcast_weight_nb, size_t* bcast_dst_nb); +int64_t ggml_cann_get_mulmat_bcast_shape(const int64_t * input_ne, + const int64_t * weight_ne, + const int64_t * dst_ne, + const size_t * input_nb, + const size_t * weight_nb, + const size_t * dst_nb, + int64_t * bcast_input_ne, + int64_t * bcast_weight_ne, + int64_t * bcast_dst_ne, + size_t * bcast_input_nb, + size_t * bcast_weight_nb, + size_t * bcast_dst_nb); // Bcast macro to avoid duplicate code. -#define BCAST_MUL_MAT_SHAPE(input, weight, dst) \ - int64_t bcast_##input##_ne[GGML_MAX_DIMS * 2]; \ - int64_t bcast_##weight##_ne[GGML_MAX_DIMS * 2]; \ - int64_t bcast_##dst##_ne[GGML_MAX_DIMS * 2]; \ - size_t bcast_##input##_nb[GGML_MAX_DIMS * 2]; \ - size_t bcast_##weight##_nb[GGML_MAX_DIMS * 2]; \ - size_t bcast_##dst##_nb[GGML_MAX_DIMS * 2]; \ - int64_t bcast_dims = ggml_cann_get_mulmat_bcast_shape( \ - input->ne, weight->ne, dst->ne, input->nb, weight->nb, dst->nb, \ - bcast_##input##_ne, bcast_##weight##_ne, bcast_##dst##_ne, \ - bcast_##input##_nb, bcast_##weight##_nb, bcast_##dst##_nb); +#define BCAST_MUL_MAT_SHAPE(input, weight, dst) \ + int64_t bcast_##input##_ne[GGML_MAX_DIMS * 2]; \ + int64_t bcast_##weight##_ne[GGML_MAX_DIMS * 2]; \ + int64_t bcast_##dst##_ne[GGML_MAX_DIMS * 2]; \ + size_t bcast_##input##_nb[GGML_MAX_DIMS * 2]; \ + size_t bcast_##weight##_nb[GGML_MAX_DIMS * 2]; \ + size_t bcast_##dst##_nb[GGML_MAX_DIMS * 2]; \ + int64_t bcast_dims = ggml_cann_get_mulmat_bcast_shape( \ + input->ne, weight->ne, dst->ne, input->nb, weight->nb, dst->nb, bcast_##input##_ne, bcast_##weight##_ne, \ + bcast_##dst##_ne, bcast_##input##_nb, bcast_##weight##_nb, bcast_##dst##_nb); -#define BCAST_MUL_MAT_PARAM(tensor) \ - bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims +#define BCAST_MUL_MAT_PARAM(tensor) bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims #endif // CANN_ACL_TENSOR_H diff --git a/ggml/src/ggml-cann/aclnn_ops.cpp b/ggml/src/ggml-cann/aclnn_ops.cpp old mode 100755 new mode 100644 index 2857e080b4c16..f030ea0136a95 --- a/ggml/src/ggml-cann/aclnn_ops.cpp +++ b/ggml/src/ggml-cann/aclnn_ops.cpp @@ -86,9 +86,12 @@ #include "../ggml-common.h" - -void bcast_shape(ggml_tensor * src0, ggml_tensor * src1, ggml_tensor * dst, aclTensor ** acl_src0, - aclTensor ** acl_src1, aclTensor ** acl_dst) { +void bcast_shape(ggml_tensor * src0, + ggml_tensor * src1, + ggml_tensor * dst, + aclTensor ** acl_src0, + aclTensor ** acl_src1, + aclTensor ** acl_dst) { GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_can_repeat(src1, src0)); // Need bcast if (!ggml_are_same_shape(src0, src1) && ggml_cann_need_bcast(src0, src1)) { @@ -103,40 +106,40 @@ void bcast_shape(ggml_tensor * src0, ggml_tensor * src1, ggml_tensor * dst, aclT } } -void ggml_cann_op_unary( - std::function unary_op, - ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; +void ggml_cann_op_unary(std::function unary_op, + ggml_backend_cann_context & ctx, + ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; - aclTensor* acl_src = ggml_cann_create_tensor(src); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); unary_op(ctx, acl_src, acl_dst); ggml_cann_release_resources(ctx, acl_src, acl_dst); } -void ggml_cann_op_unary_gated( - std::function unary_op, - ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src0 = dst->src[0]; - ggml_tensor* src1 = dst->src[1]; +void ggml_cann_op_unary_gated(std::function unary_op, + ggml_backend_cann_context & ctx, + ggml_tensor * dst) { + ggml_tensor * src0 = dst->src[0]; + ggml_tensor * src1 = dst->src[1]; GGML_ASSERT(ggml_is_contiguous_1(src0)); GGML_ASSERT(ggml_is_contiguous_1(dst)); const int32_t swapped = ggml_get_op_params_i32(dst, 1); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); - aclTensor *acl_src0 = nullptr, *acl_src1 = nullptr; - if(src1) { + aclTensor * acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src0 = nullptr, *acl_src1 = nullptr; + if (src1) { GGML_ASSERT(ggml_is_contiguous_1(src1)); GGML_ASSERT(src0->type == src1->type); acl_src0 = ggml_cann_create_tensor(src0); acl_src1 = ggml_cann_create_tensor(src1); } else { - int64_t ne[] = {src0->ne[0] / 2, src0->ne[1], src0->ne[2], src0->ne[3]}; - size_t nb[] = {src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3]}; - acl_src0 = ggml_cann_create_tensor(src0, ne, nb, GGML_MAX_DIMS, ACL_FORMAT_ND, 0); + int64_t ne[] = { src0->ne[0] / 2, src0->ne[1], src0->ne[2], src0->ne[3] }; + size_t nb[] = { src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3] }; + acl_src0 = ggml_cann_create_tensor(src0, ne, nb, GGML_MAX_DIMS, ACL_FORMAT_ND, 0); acl_src1 = ggml_cann_create_tensor(src0, ne, nb, GGML_MAX_DIMS, ACL_FORMAT_ND, ne[0] * ggml_element_size(src0)); if (swapped) { std::swap(acl_src0, acl_src1); @@ -159,10 +162,12 @@ void ggml_cann_op_unary_gated( * @param repeat_array The array specifying the number of repetitions along each * dimension. */ -static void aclnn_repeat(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_dst, int64_t* repeat_array) { +static void aclnn_repeat(ggml_backend_cann_context & ctx, + aclTensor * acl_src, + aclTensor * acl_dst, + int64_t * repeat_array) { // repeat tensor along each dim with repeat_array - aclIntArray* repeats = aclCreateIntArray(repeat_array, GGML_MAX_DIMS); + aclIntArray * repeats = aclCreateIntArray(repeat_array, GGML_MAX_DIMS); GGML_CANN_CALL_ACLNN_OP(ctx, Repeat, acl_src, repeats, acl_dst); ggml_cann_release_resources(ctx, repeats); @@ -181,61 +186,63 @@ static void aclnn_repeat(ggml_backend_cann_context& ctx, aclTensor* acl_src, * @param cast_data_type The target data type to which the source tensor will be * casted. */ -static void aclnn_cast(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_dst, aclDataType cast_data_type) { +static void aclnn_cast(ggml_backend_cann_context & ctx, + aclTensor * acl_src, + aclTensor * acl_dst, + aclDataType cast_data_type) { GGML_CANN_CALL_ACLNN_OP(ctx, Cast, acl_src, cast_data_type, acl_dst); } -void ggml_cann_repeat(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; +void ggml_cann_repeat(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; GGML_ASSERT(ggml_can_repeat(src, dst)); - aclTensor* acl_src = ggml_cann_create_tensor(src); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); - int64_t repeatsArray[] = {dst->ne[3] / src->ne[3], dst->ne[2] / src->ne[2], - dst->ne[1] / src->ne[1], dst->ne[0] / src->ne[0]}; + int64_t repeatsArray[] = { dst->ne[3] / src->ne[3], dst->ne[2] / src->ne[2], dst->ne[1] / src->ne[1], + dst->ne[0] / src->ne[0] }; aclnn_repeat(ctx, acl_src, acl_dst, repeatsArray); ggml_cann_release_resources(ctx, acl_src, acl_dst); } -void aclnn_add(ggml_backend_cann_context& ctx, aclTensor* acl_src0, - aclTensor* acl_src1, aclTensor* acl_dst) { - float alphaValue = 1.0f; - aclScalar* alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); - if (acl_dst != nullptr) +void aclnn_add(ggml_backend_cann_context & ctx, aclTensor * acl_src0, aclTensor * acl_src1, aclTensor * acl_dst) { + float alphaValue = 1.0f; + aclScalar * alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); + if (acl_dst != nullptr) { GGML_CANN_CALL_ACLNN_OP(ctx, Add, acl_src0, acl_src1, alpha, acl_dst); - else + } else { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceAdd, acl_src0, acl_src1, alpha); + } ggml_cann_release_resources(ctx, alpha); } -void aclnn_sub(ggml_backend_cann_context& ctx, aclTensor* acl_src0, - aclTensor* acl_src1, aclTensor* acl_dst) { - float alphaValue = 1.0f; - aclScalar* alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); - if (acl_dst != nullptr) +void aclnn_sub(ggml_backend_cann_context & ctx, aclTensor * acl_src0, aclTensor * acl_src1, aclTensor * acl_dst) { + float alphaValue = 1.0f; + aclScalar * alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); + if (acl_dst != nullptr) { GGML_CANN_CALL_ACLNN_OP(ctx, Sub, acl_src0, acl_src1, alpha, acl_dst); - else + } else { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceSub, acl_src0, acl_src1, alpha); + } ggml_cann_release_resources(ctx, alpha); } -void aclnn_mul(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_other, aclTensor* acl_dst) { - if (acl_dst != nullptr) +void aclnn_mul(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_other, aclTensor * acl_dst) { + if (acl_dst != nullptr) { GGML_CANN_CALL_ACLNN_OP(ctx, Mul, acl_src, acl_other, acl_dst); - else + } else { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceMul, acl_src, acl_other); + } } -void aclnn_div(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_other, aclTensor* acl_dst) { - if (acl_dst != nullptr) +void aclnn_div(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_other, aclTensor * acl_dst) { + if (acl_dst != nullptr) { GGML_CANN_CALL_ACLNN_OP(ctx, Div, acl_src, acl_other, acl_dst); - else + } else { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceDiv, acl_src, acl_other); + } } /** @@ -260,9 +267,12 @@ void aclnn_div(ggml_backend_cann_context& ctx, aclTensor* acl_src, * @param inplace Flag indicating whether to perform the operation in-place on * `acl_src`. */ -static void aclnn_muls(ggml_backend_cann_context& ctx, aclTensor* acl_src, - float scale, aclTensor* acl_dst, bool inplace) { - aclScalar* acl_scale = aclCreateScalar(&scale, aclDataType::ACL_FLOAT); +static void aclnn_muls(ggml_backend_cann_context & ctx, + aclTensor * acl_src, + float scale, + aclTensor * acl_dst, + bool inplace) { + aclScalar * acl_scale = aclCreateScalar(&scale, aclDataType::ACL_FLOAT); if (inplace) { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceMuls, acl_src, acl_scale); } else { @@ -271,19 +281,18 @@ static void aclnn_muls(ggml_backend_cann_context& ctx, aclTensor* acl_src, ggml_cann_release_resources(ctx, acl_scale); } -void ggml_cann_leaky_relu(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; +void ggml_cann_leaky_relu(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; GGML_ASSERT(src->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); - aclTensor* acl_src = ggml_cann_create_tensor(src); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); float negative_slope; memcpy(&negative_slope, dst->op_params, sizeof(float)); - aclScalar* acl_negative_slope = - aclCreateScalar(&negative_slope, aclDataType::ACL_FLOAT); + aclScalar * acl_negative_slope = aclCreateScalar(&negative_slope, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, LeakyRelu, acl_src, acl_negative_slope, acl_dst); ggml_cann_release_resources(ctx, acl_negative_slope, acl_src, acl_dst); @@ -299,26 +308,27 @@ void ggml_cann_leaky_relu(ggml_backend_cann_context& ctx, ggml_tensor* dst) { * stored. * @param concat_dim The dimension along which the tensors will be concatenated. */ -static void aclnn_concat(ggml_backend_cann_context& ctx, - aclTensorList* tensorList, aclTensor* acl_dst, - int64_t concat_dim) { +static void aclnn_concat(ggml_backend_cann_context & ctx, + aclTensorList * tensorList, + aclTensor * acl_dst, + int64_t concat_dim) { GGML_CANN_CALL_ACLNN_OP(ctx, Cat, tensorList, concat_dim, acl_dst); } -void ggml_cann_concat(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src0 = dst->src[0]; - ggml_tensor* src1 = dst->src[1]; - aclTensor* acl_src0 = ggml_cann_create_tensor(src0); - aclTensor* acl_src1 = ggml_cann_create_tensor(src1); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); +void ggml_cann_concat(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src0 = dst->src[0]; + ggml_tensor * src1 = dst->src[1]; + aclTensor * acl_src0 = ggml_cann_create_tensor(src0); + aclTensor * acl_src1 = ggml_cann_create_tensor(src1); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); const int32_t dim = ggml_get_op_params_i32(dst, 0); GGML_ASSERT(dim >= 0 && dim < 4); int32_t acl_dim = 3 - dim; - aclTensor* tensors[] = {acl_src0, acl_src1}; - aclTensorList* tensor_list = aclCreateTensorList(tensors, 2); + aclTensor * tensors[] = { acl_src0, acl_src1 }; + aclTensorList * tensor_list = aclCreateTensorList(tensors, 2); aclnn_concat(ctx, tensor_list, acl_dst, acl_dim); ggml_cann_release_resources(ctx, tensor_list, acl_dst); @@ -341,162 +351,157 @@ void ggml_cann_concat(ggml_backend_cann_context& ctx, ggml_tensor* dst) { * @param step The step size between consecutive values. * @param n_elements The number of elements in the destination tensor. */ -static void aclnn_arange(ggml_backend_cann_context& ctx, aclTensor* acl_dst, - float start, float stop, float step, - int64_t n_elements) { - int64_t steps = (int64_t)std::ceil((stop - start) / step); +static void aclnn_arange(ggml_backend_cann_context & ctx, + aclTensor * acl_dst, + float start, + float stop, + float step, + int64_t n_elements) { + int64_t steps = (int64_t) std::ceil((stop - start) / step); GGML_ASSERT(n_elements == steps); - aclScalar* acl_start = aclCreateScalar(&start, aclDataType::ACL_FLOAT); - aclScalar* acl_end = aclCreateScalar(&stop, aclDataType::ACL_FLOAT); - aclScalar* acl_step = aclCreateScalar(&step, aclDataType::ACL_FLOAT); + aclScalar * acl_start = aclCreateScalar(&start, aclDataType::ACL_FLOAT); + aclScalar * acl_end = aclCreateScalar(&stop, aclDataType::ACL_FLOAT); + aclScalar * acl_step = aclCreateScalar(&step, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, Arange, acl_start, acl_end, acl_step, acl_dst); ggml_cann_release_resources(ctx, acl_start, acl_end, acl_step); } -void ggml_cann_arange(ggml_backend_cann_context& ctx, ggml_tensor* dst) { +void ggml_cann_arange(ggml_backend_cann_context & ctx, ggml_tensor * dst) { GGML_ASSERT(dst->type == GGML_TYPE_F32); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); int64_t n_elements = ggml_nelements(dst); - float start; - float stop; - float step; - memcpy(&start, (float*)dst->op_params + 0, sizeof(float)); - memcpy(&stop, (float*)dst->op_params + 1, sizeof(float)); - memcpy(&step, (float*)dst->op_params + 2, sizeof(float)); + float start; + float stop; + float step; + memcpy(&start, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&stop, (float *) dst->op_params + 1, sizeof(float)); + memcpy(&step, (float *) dst->op_params + 2, sizeof(float)); aclnn_arange(ctx, acl_dst, start, stop, step, n_elements); ggml_cann_release_resources(ctx, acl_dst); } -void ggml_cann_clamp(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; +void ggml_cann_clamp(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; float min; float max; memcpy(&min, dst->op_params, sizeof(float)); - memcpy(&max, (float*)dst->op_params + 1, sizeof(float)); + memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); - aclTensor* acl_src = ggml_cann_create_tensor(src); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); - aclScalar* acl_min = aclCreateScalar(&min, aclDataType::ACL_FLOAT); - aclScalar* acl_max = aclCreateScalar(&max, aclDataType::ACL_FLOAT); + aclScalar * acl_min = aclCreateScalar(&min, aclDataType::ACL_FLOAT); + aclScalar * acl_max = aclCreateScalar(&max, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, Clamp, acl_src, acl_min, acl_max, acl_dst); ggml_cann_release_resources(ctx, acl_min, acl_max, acl_src, acl_dst); } -void ggml_cann_scale(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; +void ggml_cann_scale(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; // scale factor float v; memcpy(&v, dst->op_params, sizeof(float)); - aclScalar* scale = aclCreateScalar(&v, aclDataType::ACL_FLOAT); - aclTensor* acl_src = ggml_cann_create_tensor(src); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclScalar * scale = aclCreateScalar(&v, aclDataType::ACL_FLOAT); + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); GGML_CANN_CALL_ACLNN_OP(ctx, Muls, acl_src, scale, acl_dst); ggml_cann_release_resources(ctx, scale, acl_src, acl_dst); } -void ggml_cann_argsort(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; - enum ggml_sort_order order = (enum ggml_sort_order)dst->op_params[0]; - - aclTensor* acl_src = ggml_cann_create_tensor(src); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); - ggml_cann_pool_alloc temp_buffer_allocator( - ctx.pool(), ggml_nelements(dst) * sizeof(int64_t)); - void* buffer = temp_buffer_allocator.get(); - aclTensor* tmp_tensor = - ggml_cann_create_tensor(buffer, ACL_INT64, ggml_type_size(dst->type), - dst->ne, dst->nb, GGML_MAX_DIMS); - GGML_CANN_CALL_ACLNN_OP(ctx, Argsort, acl_src, -1, (order == GGML_SORT_ORDER_DESC ? true : false), - tmp_tensor); +void ggml_cann_argsort(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; + enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0]; + + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); + ggml_cann_pool_alloc temp_buffer_allocator(ctx.pool(), ggml_nelements(dst) * sizeof(int64_t)); + void * buffer = temp_buffer_allocator.get(); + aclTensor * tmp_tensor = + ggml_cann_create_tensor(buffer, ACL_INT64, ggml_type_size(dst->type), dst->ne, dst->nb, GGML_MAX_DIMS); + GGML_CANN_CALL_ACLNN_OP(ctx, Argsort, acl_src, -1, (order == GGML_SORT_ORDER_DESC ? true : false), tmp_tensor); GGML_CANN_CALL_ACLNN_OP(ctx, Cast, tmp_tensor, ggml_cann_type_mapping(dst->type), acl_dst); ggml_cann_release_resources(ctx, acl_src, tmp_tensor, acl_dst); } -void ggml_cann_norm(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; +void ggml_cann_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; - aclTensor* acl_src = ggml_cann_create_tensor(src); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); float eps; memcpy(&eps, dst->op_params, sizeof(float)); - std::vector normData = {dst->ne[0]}; - aclIntArray* norm = aclCreateIntArray(normData.data(), normData.size()); - GGML_CANN_CALL_ACLNN_OP(ctx, LayerNorm, acl_src, norm, nullptr, nullptr, - eps, acl_dst, nullptr, nullptr); + std::vector normData = { dst->ne[0] }; + aclIntArray * norm = aclCreateIntArray(normData.data(), normData.size()); + GGML_CANN_CALL_ACLNN_OP(ctx, LayerNorm, acl_src, norm, nullptr, nullptr, eps, acl_dst, nullptr, nullptr); ggml_cann_release_resources(ctx, norm, acl_src, acl_dst); } -void ggml_cann_group_norm(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; +void ggml_cann_group_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; - aclTensor* acl_src = ggml_cann_create_tensor(src); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); int n_groups = dst->op_params[0]; float eps; memcpy(&eps, dst->op_params + 1, sizeof(float)); - int64_t N = src->ne[3]; - int64_t C = src->ne[2]; + int64_t N = src->ne[3]; + int64_t C = src->ne[2]; int64_t HxW = src->ne[1] * src->ne[0]; - size_t type_size = ggml_type_size(src->type); - int64_t ne[] = {n_groups, N}; - size_t nb[] = {type_size, type_size * n_groups}; - size_t n_bytes = N * n_groups; + size_t type_size = ggml_type_size(src->type); + int64_t ne[] = { n_groups, N }; + size_t nb[] = { type_size, type_size * n_groups }; + size_t n_bytes = N * n_groups; ggml_cann_pool_alloc temp_buffer_allocator(ctx.pool(), n_bytes * 2); - void* buffer = temp_buffer_allocator.get(); - aclTensor* acl_mean_out = ggml_cann_create_tensor( - buffer, ACL_FLOAT, type_size, ne, nb, ACL_FORMAT_ND); - aclTensor* acl_rstd_out = ggml_cann_create_tensor( - (char*)buffer + n_bytes, ACL_FLOAT, type_size, ne, nb, ACL_FORMAT_ND); - - GGML_CANN_CALL_ACLNN_OP(ctx, GroupNorm, acl_src, nullptr, nullptr, N, C, HxW, n_groups, eps, - acl_dst, acl_mean_out, acl_rstd_out); + void * buffer = temp_buffer_allocator.get(); + aclTensor * acl_mean_out = ggml_cann_create_tensor(buffer, ACL_FLOAT, type_size, ne, nb, ACL_FORMAT_ND); + aclTensor * acl_rstd_out = + ggml_cann_create_tensor((char *) buffer + n_bytes, ACL_FLOAT, type_size, ne, nb, ACL_FORMAT_ND); + + GGML_CANN_CALL_ACLNN_OP(ctx, GroupNorm, acl_src, nullptr, nullptr, N, C, HxW, n_groups, eps, acl_dst, acl_mean_out, + acl_rstd_out); ggml_cann_release_resources(ctx, acl_src, acl_dst, acl_mean_out, acl_rstd_out); } -void ggml_cann_acc(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src0 = dst->src[0]; - ggml_tensor* src1 = dst->src[1]; +void ggml_cann_acc(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src0 = dst->src[0]; + ggml_tensor * src1 = dst->src[1]; - size_t nb1 = ((int32_t*)dst->op_params)[0]; - size_t nb2 = ((int32_t*)dst->op_params)[1]; - size_t nb3 = ((int32_t*)dst->op_params)[2]; - size_t offset = ((int32_t*)dst->op_params)[3]; - bool inplace = (bool)((int32_t*)dst->op_params)[4]; + size_t nb1 = ((int32_t *) dst->op_params)[0]; + size_t nb2 = ((int32_t *) dst->op_params)[1]; + size_t nb3 = ((int32_t *) dst->op_params)[2]; + size_t offset = ((int32_t *) dst->op_params)[3]; + bool inplace = (bool) ((int32_t *) dst->op_params)[4]; - size_t param_nb[] = {ggml_element_size(src0), nb1, nb2, nb3}; + size_t param_nb[] = { ggml_element_size(src0), nb1, nb2, nb3 }; - aclTensor* acl_dst = ggml_cann_create_tensor( - dst, src1->ne, param_nb, GGML_MAX_DIMS, ACL_FORMAT_ND, offset); - aclTensor* acl_src1 = ggml_cann_create_tensor(src1); + aclTensor * acl_dst = ggml_cann_create_tensor(dst, src1->ne, param_nb, GGML_MAX_DIMS, ACL_FORMAT_ND, offset); + aclTensor * acl_src1 = ggml_cann_create_tensor(src1); - aclScalar* alpha = nullptr; - float alphaValue = 1.0f; - alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); + aclScalar * alpha = nullptr; + float alphaValue = 1.0f; + alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); if (!inplace) { size_t cpy_size = ggml_nbytes(dst); - ggml_cann_async_memcpy(ctx, dst->data, src0->data, cpy_size, - ACL_MEMCPY_DEVICE_TO_DEVICE); - aclTensor* acl_src0 = ggml_cann_create_tensor( - src0, src1->ne, src0->nb, GGML_MAX_DIMS, ACL_FORMAT_ND, offset); + ggml_cann_async_memcpy(ctx, dst->data, src0->data, cpy_size, ACL_MEMCPY_DEVICE_TO_DEVICE); + aclTensor * acl_src0 = ggml_cann_create_tensor(src0, src1->ne, src0->nb, GGML_MAX_DIMS, ACL_FORMAT_ND, offset); GGML_CANN_CALL_ACLNN_OP(ctx, Add, acl_src0, acl_src1, alpha, acl_dst); ggml_cann_release_resources(ctx, acl_src0); @@ -516,39 +521,34 @@ void ggml_cann_acc(ggml_backend_cann_context& ctx, ggml_tensor* dst) { * @param dim An array of dimension indices. * @param dim_size The number of dimensions. */ -static void aclnn_reduce_sum(ggml_backend_cann_context& ctx, ggml_tensor* dst, - int64_t* dim, size_t dim_size) { +static void aclnn_reduce_sum(ggml_backend_cann_context & ctx, ggml_tensor * dst, int64_t * dim, size_t dim_size) { GGML_ASSERT(dst->ne[0] == 1); - ggml_tensor* src = dst->src[0]; - aclTensor* acl_src = ggml_cann_create_tensor(src); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); - aclIntArray* reduce_dims = aclCreateIntArray(dim, dim_size); + ggml_tensor * src = dst->src[0]; + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); + aclIntArray * reduce_dims = aclCreateIntArray(dim, dim_size); - GGML_CANN_CALL_ACLNN_OP(ctx, ReduceSum, acl_src, reduce_dims, true, - ggml_cann_type_mapping(dst->type), acl_dst); + GGML_CANN_CALL_ACLNN_OP(ctx, ReduceSum, acl_src, reduce_dims, true, ggml_cann_type_mapping(dst->type), acl_dst); ggml_cann_release_resources(ctx, acl_src, acl_dst, reduce_dims); } -void ggml_cann_sum_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - int64_t reduce_dims[] = {3}; +void ggml_cann_sum_rows(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + int64_t reduce_dims[] = { 3 }; aclnn_reduce_sum(ctx, dst, reduce_dims, 1); } -void ggml_cann_sum(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - int64_t reduce_dims[] = {0, 1, 2, 3}; +void ggml_cann_sum(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + int64_t reduce_dims[] = { 0, 1, 2, 3 }; aclnn_reduce_sum(ctx, dst, reduce_dims, 4); } -void ggml_cann_upsample_nearest2d(ggml_backend_cann_context& ctx, - ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; - aclTensor* acl_src = - ggml_cann_create_tensor(src, nullptr, nullptr, 0, ACL_FORMAT_NCHW); - aclTensor* acl_dst = - ggml_cann_create_tensor(dst, nullptr, nullptr, 0, ACL_FORMAT_NCHW); +void ggml_cann_upsample_nearest2d(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; + aclTensor * acl_src = ggml_cann_create_tensor(src, nullptr, nullptr, 0, ACL_FORMAT_NCHW); + aclTensor * acl_dst = ggml_cann_create_tensor(dst, nullptr, nullptr, 0, ACL_FORMAT_NCHW); - std::vector output_size{dst->ne[1], dst->ne[0]}; - auto output_size_array = aclCreateIntArray(output_size.data(), 2); + std::vector output_size{ dst->ne[1], dst->ne[0] }; + auto output_size_array = aclCreateIntArray(output_size.data(), 2); GGML_CANN_CALL_ACLNN_OP(ctx, UpsampleNearest2d, acl_src, output_size_array, acl_dst); ggml_cann_release_resources(ctx, acl_src, acl_dst, output_size_array); @@ -568,20 +568,22 @@ void ggml_cann_upsample_nearest2d(ggml_backend_cann_context& ctx, * The size of the array should be twice the number of dimensions of the tensor. * @param value The value to be used for padding. The default value is 0.0. */ -static void aclnn_pad(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_dst, int64_t* paddings, - float value = 0.0f) { - aclIntArray* acl_pad = aclCreateIntArray(paddings, GGML_MAX_DIMS * 2); - aclScalar* acl_value = aclCreateScalar(&value, aclDataType::ACL_FLOAT); +static void aclnn_pad(ggml_backend_cann_context & ctx, + aclTensor * acl_src, + aclTensor * acl_dst, + int64_t * paddings, + float value = 0.0f) { + aclIntArray * acl_pad = aclCreateIntArray(paddings, GGML_MAX_DIMS * 2); + aclScalar * acl_value = aclCreateScalar(&value, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, ConstantPadNd, acl_src, acl_pad, acl_value, acl_dst); ggml_cann_release_resources(ctx, acl_pad, acl_value); } -void ggml_cann_pad(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; - aclTensor* acl_src = ggml_cann_create_tensor(src); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); +void ggml_cann_pad(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); // padding: value in the array means how much distance will be padding. // the position of elements in the array means which dirction to padding, @@ -596,7 +598,7 @@ void ggml_cann_pad(ggml_backend_cann_context& ctx, ggml_tensor* dst) { const int32_t lp3 = ggml_get_op_params_i32(dst, 6); const int32_t rp3 = ggml_get_op_params_i32(dst, 7); - int64_t paddings[] = {lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3}; + int64_t paddings[] = { lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3 }; aclnn_pad(ctx, acl_src, acl_dst, paddings); ggml_cann_release_resources(ctx, acl_src, acl_dst); } @@ -613,46 +615,41 @@ void ggml_cann_pad(ggml_backend_cann_context& ctx, ggml_tensor* dst) { * @param dst The destination tensor where the result will be stored. The source * tensor is referenced by `dst->src[0]`. */ -static void ggml_cann_avg_pool2d(ggml_backend_cann_context& ctx, - ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; +static void ggml_cann_avg_pool2d(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; GGML_ASSERT(src->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); - aclTensor* acl_src = - ggml_cann_create_tensor(src, nullptr, nullptr, 0, ACL_FORMAT_NCHW); - aclTensor* acl_dst = - ggml_cann_create_tensor(dst, nullptr, nullptr, 0, ACL_FORMAT_NCHW); - - const int32_t* opts = (const int32_t*)dst->op_params; - const int k0 = opts[1]; - const int k1 = opts[2]; - const int s0 = opts[3]; - const int s1 = opts[4]; - const int p0 = opts[5]; - const int p1 = opts[6]; - - std::vector kernel_dims = {k1, k0}; - std::vector stride_dims = {s1, s0}; - std::vector padding_avg_dims = {p1, p0}; // (padH, padW) - - auto* kernel_size = aclCreateIntArray(kernel_dims.data(), 2); - auto* strides = aclCreateIntArray(stride_dims.data(), 2); - auto* paddings_avg = aclCreateIntArray(padding_avg_dims.data(), 2); - - bool ceil_mode = false; - bool count_include_pad = true; - int64_t divisor_override = 0; - int8_t cube_math_type = 0; + aclTensor * acl_src = ggml_cann_create_tensor(src, nullptr, nullptr, 0, ACL_FORMAT_NCHW); + aclTensor * acl_dst = ggml_cann_create_tensor(dst, nullptr, nullptr, 0, ACL_FORMAT_NCHW); + + const int32_t * opts = (const int32_t *) dst->op_params; + const int k0 = opts[1]; + const int k1 = opts[2]; + const int s0 = opts[3]; + const int s1 = opts[4]; + const int p0 = opts[5]; + const int p1 = opts[6]; + + std::vector kernel_dims = { k1, k0 }; + std::vector stride_dims = { s1, s0 }; + std::vector padding_avg_dims = { p1, p0 }; // (padH, padW) + + auto * kernel_size = aclCreateIntArray(kernel_dims.data(), 2); + auto * strides = aclCreateIntArray(stride_dims.data(), 2); + auto * paddings_avg = aclCreateIntArray(padding_avg_dims.data(), 2); + + bool ceil_mode = false; + bool count_include_pad = true; + int64_t divisor_override = 0; + int8_t cube_math_type = 0; #ifdef ASCEND_310P cube_math_type = 1; #endif - GGML_CANN_CALL_ACLNN_OP(ctx, AvgPool2d, acl_src, kernel_size, strides, paddings_avg, - ceil_mode, count_include_pad, divisor_override, - cube_math_type, acl_dst); - ggml_cann_release_resources(ctx, acl_src, acl_dst, kernel_size, strides, - paddings_avg); + GGML_CANN_CALL_ACLNN_OP(ctx, AvgPool2d, acl_src, kernel_size, strides, paddings_avg, ceil_mode, count_include_pad, + divisor_override, cube_math_type, acl_dst); + ggml_cann_release_resources(ctx, acl_src, acl_dst, kernel_size, strides, paddings_avg); } /** @@ -667,68 +664,61 @@ static void ggml_cann_avg_pool2d(ggml_backend_cann_context& ctx, * @param dst The destination tensor where the result will be stored. The source * tensor is referenced by `dst->src[0]`. */ -static void ggml_cann_max_pool2d(ggml_backend_cann_context& ctx, - ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; +static void ggml_cann_max_pool2d(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; GGML_ASSERT(src->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); - aclTensor* acl_src = - ggml_cann_create_tensor(src, nullptr, nullptr, 0, ACL_FORMAT_NCHW); - aclTensor* acl_dst = - ggml_cann_create_tensor(dst, nullptr, nullptr, 0, ACL_FORMAT_NCHW); + aclTensor * acl_src = ggml_cann_create_tensor(src, nullptr, nullptr, 0, ACL_FORMAT_NCHW); + aclTensor * acl_dst = ggml_cann_create_tensor(dst, nullptr, nullptr, 0, ACL_FORMAT_NCHW); - const int32_t* opts = (const int32_t*)dst->op_params; - const int k0 = opts[1]; - const int k1 = opts[2]; - const int s0 = opts[3]; - const int s1 = opts[4]; - const int p0 = opts[5]; - const int p1 = opts[6]; + const int32_t * opts = (const int32_t *) dst->op_params; + const int k0 = opts[1]; + const int k1 = opts[2]; + const int s0 = opts[3]; + const int s1 = opts[4]; + const int p0 = opts[5]; + const int p1 = opts[6]; - int64_t temp_ne[] = {src->ne[0] + p0 * 2, src->ne[1] + p1 * 2, src->ne[2], - src->ne[3]}; - size_t temp_nb[GGML_MAX_DIMS]; + int64_t temp_ne[] = { src->ne[0] + p0 * 2, src->ne[1] + p1 * 2, src->ne[2], src->ne[3] }; + size_t temp_nb[GGML_MAX_DIMS]; temp_nb[0] = ggml_element_size(src); for (int i = 1; i < GGML_MAX_DIMS; i++) { temp_nb[i] = temp_nb[i - 1] * temp_ne[i - 1]; } - ggml_cann_pool_alloc temp_buffer_allocator( - ctx.pool(), ggml_nbytes(src) + p0 * 2 + p1 * 2 * src->nb[1]); - void* buffer = temp_buffer_allocator.get(); - aclTensor* tmp_tensor = ggml_cann_create_tensor( - buffer, ACL_FLOAT, ggml_element_size(src), temp_ne, temp_nb, - GGML_MAX_DIMS, ACL_FORMAT_NCHW); + ggml_cann_pool_alloc temp_buffer_allocator(ctx.pool(), ggml_nbytes(src) + p0 * 2 + p1 * 2 * src->nb[1]); + void * buffer = temp_buffer_allocator.get(); + aclTensor * tmp_tensor = ggml_cann_create_tensor(buffer, ACL_FLOAT, ggml_element_size(src), temp_ne, temp_nb, + GGML_MAX_DIMS, ACL_FORMAT_NCHW); // pad: see padding in ggml_cann_pad() - int64_t paddings[] = {p0, p0, p1, p1, 0, 0, 0, 0}; - float value = -FLT_MAX; + int64_t paddings[] = { p0, p0, p1, p1, 0, 0, 0, 0 }; + float value = -FLT_MAX; aclnn_pad(ctx, acl_src, tmp_tensor, paddings, value); // max_pool - std::vector kernel_dims = {k1, k0}; - std::vector stride_dims = {s1, s0}; + std::vector kernel_dims = { k1, k0 }; + std::vector stride_dims = { s1, s0 }; // padding_max_dims: [dim0_start, dim0_end, dim1_start, dim1_end] - std::vector padding_max_dims = {0, 0, 0, 0}; - std::vector dilation_size = {1, 1}; - auto* kernel_size = aclCreateIntArray(kernel_dims.data(), 2); - auto* strides = aclCreateIntArray(stride_dims.data(), 2); - auto* paddings_max = aclCreateIntArray(padding_max_dims.data(), 4); - auto* dilations = aclCreateIntArray(dilation_size.data(), 2); - - bool ceil_mode = false; + std::vector padding_max_dims = { 0, 0, 0, 0 }; + std::vector dilation_size = { 1, 1 }; + auto * kernel_size = aclCreateIntArray(kernel_dims.data(), 2); + auto * strides = aclCreateIntArray(stride_dims.data(), 2); + auto * paddings_max = aclCreateIntArray(padding_max_dims.data(), 4); + auto * dilations = aclCreateIntArray(dilation_size.data(), 2); + + bool ceil_mode = false; int64_t auto_pads = 0; - GGML_CANN_CALL_ACLNN_OP(ctx, MaxPool, tmp_tensor, kernel_size, strides, auto_pads, - paddings_max, dilations, ceil_mode, acl_dst); - ggml_cann_release_resources(ctx, acl_src, acl_dst, tmp_tensor, kernel_size, - strides, paddings_max, dilations); + GGML_CANN_CALL_ACLNN_OP(ctx, MaxPool, tmp_tensor, kernel_size, strides, auto_pads, paddings_max, dilations, + ceil_mode, acl_dst); + ggml_cann_release_resources(ctx, acl_src, acl_dst, tmp_tensor, kernel_size, strides, paddings_max, dilations); } -void ggml_cann_pool2d(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - const int32_t* opts = (const int32_t*)dst->op_params; - enum ggml_op_pool op = static_cast(opts[0]); +void ggml_cann_pool2d(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + const int32_t * opts = (const int32_t *) dst->op_params; + enum ggml_op_pool op = static_cast(opts[0]); switch (op) { case GGML_OP_POOL_AVG: ggml_cann_avg_pool2d(ctx, dst); @@ -752,17 +742,16 @@ void ggml_cann_pool2d(ggml_backend_cann_context& ctx, ggml_tensor* dst) { * @param acl_src The source tensor from which data will be copied. * @param acl_dst The destination tensor where the data will be copied to. */ -static void cann_copy(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_dst) { +static void cann_copy(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceCopy, acl_dst, acl_src); } -void ggml_cann_dup(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src0 = dst->src[0]; +void ggml_cann_dup(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src0 = dst->src[0]; if (ggml_are_same_shape(src0, dst)) { - aclTensor* acl_src = ggml_cann_create_tensor(src0); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src = ggml_cann_create_tensor(src0); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); if (dst->type == src0->type) { cann_copy(ctx, acl_src, acl_dst); } else { @@ -770,22 +759,20 @@ void ggml_cann_dup(ggml_backend_cann_context& ctx, ggml_tensor* dst) { } ggml_cann_release_resources(ctx, acl_src, acl_dst); } else { - void* src_trans_buffer = src0->data; + void * src_trans_buffer = src0->data; ggml_cann_pool_alloc src_buffer_allocator; if (!ggml_is_contiguous(src0)) { - aclTensor* acl_src = ggml_cann_create_tensor(src0); - src_buffer_allocator.alloc(ctx.pool(), - ggml_nelements(src0) * ggml_type_size(src0->type)); + aclTensor * acl_src = ggml_cann_create_tensor(src0); + src_buffer_allocator.alloc(ctx.pool(), ggml_nelements(src0) * ggml_type_size(src0->type)); src_trans_buffer = src_buffer_allocator.get(); size_t src_trans_nb[GGML_MAX_DIMS]; src_trans_nb[0] = ggml_type_size(src0->type); for (int i = 1; i < GGML_MAX_DIMS; i++) { src_trans_nb[i] = src_trans_nb[i - 1] * src0->ne[i - 1]; } - aclTensor* src_trans_tensor = ggml_cann_create_tensor( - src_trans_buffer, ggml_cann_type_mapping(src0->type), - ggml_type_size(src0->type), src0->ne, src_trans_nb, - GGML_MAX_DIMS); + aclTensor * src_trans_tensor = + ggml_cann_create_tensor(src_trans_buffer, ggml_cann_type_mapping(src0->type), + ggml_type_size(src0->type), src0->ne, src_trans_nb, GGML_MAX_DIMS); cann_copy(ctx, acl_src, src_trans_tensor); ggml_cann_release_resources(ctx, acl_src, src_trans_tensor); } @@ -796,10 +783,10 @@ void ggml_cann_dup(ggml_backend_cann_context& ctx, ggml_tensor* dst) { src_reshape_nb[i] = src_reshape_nb[i - 1] * dst->ne[i - 1]; } - aclTensor* trans_acl_src = ggml_cann_create_tensor(src_trans_buffer, - ggml_cann_type_mapping(src0->type),ggml_type_size(src0->type), - dst->ne, src_reshape_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * trans_acl_src = + ggml_cann_create_tensor(src_trans_buffer, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), + dst->ne, src_reshape_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); if (dst->type == src0->type) { cann_copy(ctx, trans_acl_src, acl_dst); @@ -827,17 +814,20 @@ void ggml_cann_dup(ggml_backend_cann_context& ctx, ggml_tensor* dst) { * @param type_size The size of each element in the tensor data type. * @return An ACL tensor initialized with zeros. */ -static aclTensor* aclnn_zero(ggml_backend_cann_context& ctx, void* buffer, - size_t n_bytes, int64_t* ne, int64_t dims, - aclDataType type, size_t type_size) { +static aclTensor * aclnn_zero(ggml_backend_cann_context & ctx, + void * buffer, + size_t n_bytes, + int64_t * ne, + int64_t dims, + aclDataType type, + size_t type_size) { size_t nb[GGML_MAX_DIMS]; nb[0] = type_size; for (int i = 1; i < dims; i++) { nb[i] = nb[i - 1] * ne[i - 1]; } - aclTensor* zero = - ggml_cann_create_tensor(buffer, type, type_size, ne, nb, dims); + aclTensor * zero = ggml_cann_create_tensor(buffer, type, type_size, ne, nb, dims); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceZero, zero); return zero; GGML_UNUSED(n_bytes); @@ -861,15 +851,18 @@ static aclTensor* aclnn_zero(ggml_backend_cann_context& ctx, void* buffer, * is 1.0). * @return An ACL tensor initialized with value. */ -static aclTensor* aclnn_values(ggml_backend_cann_context& ctx, void* buffer, - size_t n_bytes, int64_t* ne, int64_t dims, - aclDataType type, size_t type_size, - float value = 1.0f) { - aclTensor* acl_tensor = - aclnn_zero(ctx, buffer, n_bytes, ne, dims, type, type_size); - float alpha_host = 1.0f; - aclScalar* alpha = aclCreateScalar(&alpha_host, aclDataType::ACL_FLOAT); - aclScalar* other = aclCreateScalar(&value, aclDataType::ACL_FLOAT); +static aclTensor * aclnn_values(ggml_backend_cann_context & ctx, + void * buffer, + size_t n_bytes, + int64_t * ne, + int64_t dims, + aclDataType type, + size_t type_size, + float value = 1.0f) { + aclTensor * acl_tensor = aclnn_zero(ctx, buffer, n_bytes, ne, dims, type, type_size); + float alpha_host = 1.0f; + aclScalar * alpha = aclCreateScalar(&alpha_host, aclDataType::ACL_FLOAT); + aclScalar * other = aclCreateScalar(&value, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceAdds, acl_tensor, other, alpha); return acl_tensor; } @@ -884,8 +877,7 @@ static aclTensor* aclnn_values(ggml_backend_cann_context& ctx, void* buffer, * @param scalar The scalar value used to fill the tensor. * @param acl_dst The destination tensor to be filled with the scalar value. */ -static void aclnn_fill_scalar(ggml_backend_cann_context& ctx, float scalar, - aclTensor* acl_dst) { +static void aclnn_fill_scalar(ggml_backend_cann_context & ctx, float scalar, aclTensor * acl_dst) { auto acl_scalar = aclCreateScalar(&scalar, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceFillScalar, acl_dst, acl_scalar); ggml_cann_release_resources(ctx, acl_scalar); @@ -913,15 +905,14 @@ static void aclnn_fill_scalar(ggml_backend_cann_context& ctx, float scalar, * initialization via memset or arbitrary values via fill_scalar). * @return An aclTensor pointer created from the cached buffer. */ -static aclTensor* get_cache_acl_tensor( - ggml_backend_cann_context& ctx, - void** buffer, - int64_t &cache_element, - int64_t* ne, - size_t* nb, - ggml_type dtype, - int64_t dims, - float value) { +static aclTensor * get_cache_acl_tensor(ggml_backend_cann_context & ctx, + void ** buffer, + int64_t & cache_element, + int64_t * ne, + size_t * nb, + ggml_type dtype, + int64_t dims, + float value) { // Calculate total number of elements int64_t n_element = 1; for (int i = 0; i < dims; i++) { @@ -940,24 +931,22 @@ static aclTensor* get_cache_acl_tensor( cache_element = n_element; // Initialize cache - int64_t pool_ne[1] = { n_element }; - size_t pool_nb[1] = { ggml_type_size(dtype) }; - aclTensor* acl_value = ggml_cann_create_tensor( - *buffer, ggml_cann_type_mapping(dtype), ggml_type_size(dtype), - pool_ne, pool_nb, 1); + int64_t pool_ne[1] = { n_element }; + size_t pool_nb[1] = { ggml_type_size(dtype) }; + aclTensor * acl_value = + ggml_cann_create_tensor(*buffer, ggml_cann_type_mapping(dtype), ggml_type_size(dtype), pool_ne, pool_nb, 1); aclnn_fill_scalar(ctx, value, acl_value); ggml_cann_release_resources(ctx, acl_value); } - return ggml_cann_create_tensor(*buffer, ggml_cann_type_mapping(dtype), - ggml_type_size(dtype), ne, nb, dims); + return ggml_cann_create_tensor(*buffer, ggml_cann_type_mapping(dtype), ggml_type_size(dtype), ne, nb, dims); } -void ggml_cann_rms_norm(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; +void ggml_cann_rms_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; - aclTensor* acl_src = ggml_cann_create_tensor(src); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); float eps; memcpy(&eps, dst->op_params, sizeof(float)); @@ -969,61 +958,50 @@ void ggml_cann_rms_norm(ggml_backend_cann_context& ctx, ggml_tensor* dst) { for (int i = 1; i < GGML_MAX_DIMS; i++) { acl_gamma_nb[i] = acl_gamma_nb[i - 1] * src->ne[i - 1]; } - aclTensor* acl_gamma = get_cache_acl_tensor( - ctx, - &ctx.rms_norm_one_tensor_cache.cache, - ctx.rms_norm_one_tensor_cache.size, - src->ne, - acl_gamma_nb, - dst->type, - 1, // dims - 1.0f // value + aclTensor * acl_gamma = get_cache_acl_tensor(ctx, &ctx.rms_norm_one_tensor_cache.cache, + ctx.rms_norm_one_tensor_cache.size, src->ne, acl_gamma_nb, dst->type, + 1, // dims + 1.0f // value ); // build rstd. - int64_t acl_rstd_ne[] = {src->ne[1], src->ne[2], src->ne[3]}; - size_t acl_rstd_nb[GGML_MAX_DIMS - 1]; + int64_t acl_rstd_ne[] = { src->ne[1], src->ne[2], src->ne[3] }; + size_t acl_rstd_nb[GGML_MAX_DIMS - 1]; // rstd will always be F32. acl_rstd_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS - 1; i++) { acl_rstd_nb[i] = acl_rstd_nb[i - 1] * acl_rstd_ne[i - 1]; } - aclTensor* acl_rstd = get_cache_acl_tensor( - ctx, - &ctx.rms_norm_zero_tensor_cache.cache, - ctx.rms_norm_zero_tensor_cache.size, - acl_rstd_ne, - acl_rstd_nb, - GGML_TYPE_F32, - GGML_MAX_DIMS - 1, - 0.0f // value - ); + aclTensor * acl_rstd = + get_cache_acl_tensor(ctx, &ctx.rms_norm_zero_tensor_cache.cache, ctx.rms_norm_zero_tensor_cache.size, + acl_rstd_ne, acl_rstd_nb, GGML_TYPE_F32, GGML_MAX_DIMS - 1, + 0.0f // value + ); GGML_CANN_CALL_ACLNN_OP(ctx, RmsNorm, acl_src, acl_gamma, eps, acl_dst, acl_rstd); ggml_cann_release_resources(ctx, acl_src, acl_dst, acl_gamma, acl_rstd); } // TODO: performace is low. -void ggml_cann_diag_mask(ggml_backend_cann_context& ctx, ggml_tensor* dst, - float value) { - ggml_tensor* src = dst->src[0]; +void ggml_cann_diag_mask(ggml_backend_cann_context & ctx, ggml_tensor * dst, float value) { + ggml_tensor * src = dst->src[0]; - aclTensor* acl_src = ggml_cann_create_tensor(src); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); - const int n_past = ((int32_t*)dst->op_params)[0]; + const int n_past = ((int32_t *) dst->op_params)[0]; ggml_cann_pool_alloc one_tensor_allocator(ctx.pool(), ggml_nbytes(src)); - void* buffer = one_tensor_allocator.get(); + void * buffer = one_tensor_allocator.get(); - aclTensor* mask_tensor = ggml_cann_create_tensor(buffer, ggml_cann_type_mapping(src->type), - ggml_type_size(src->type), src->ne, src->nb, GGML_MAX_DIMS); + aclTensor * mask_tensor = ggml_cann_create_tensor(buffer, ggml_cann_type_mapping(src->type), + ggml_type_size(src->type), src->ne, src->nb, GGML_MAX_DIMS); aclnn_fill_scalar(ctx, value, mask_tensor); - aclScalar* alpha = nullptr; - float alphaValue = 1.0f; - alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); + aclScalar * alpha = nullptr; + float alphaValue = 1.0f; + alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceTriu, mask_tensor, n_past + 1); GGML_CANN_CALL_ACLNN_OP(ctx, Tril, acl_src, n_past + 1, acl_dst); @@ -1046,25 +1024,27 @@ void ggml_cann_diag_mask(ggml_backend_cann_context& ctx, ggml_tensor* dst, * tensor. * @param dims The number of dimensions in the tensor. */ -static void aclnn_permute(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_dst, int64_t* new_dim, uint64_t dims) { - aclIntArray* acl_dims = aclCreateIntArray(new_dim, dims); +static void aclnn_permute(ggml_backend_cann_context & ctx, + aclTensor * acl_src, + aclTensor * acl_dst, + int64_t * new_dim, + uint64_t dims) { + aclIntArray * acl_dims = aclCreateIntArray(new_dim, dims); GGML_CANN_CALL_ACLNN_OP(ctx, Permute, acl_src, acl_dims, acl_dst); ggml_cann_release_resources(ctx, acl_dims); } -static void ggml_cann_im2col_2d_post_process(ggml_backend_cann_context& ctx, - ggml_tensor* dst, - ggml_tensor* src1, - aclTensor* tmp_cast_tensor, - aclTensor* tmp_im2col_tensor) { +static void ggml_cann_im2col_2d_post_process(ggml_backend_cann_context & ctx, + ggml_tensor * dst, + ggml_tensor * src1, + aclTensor * tmp_cast_tensor, + aclTensor * tmp_im2col_tensor) { // Permute: [N, IC * KH * KW, OW * OH] -> [N, OW * OH, IC * KH * KW] - int64_t dst_ne[] = {dst->ne[0], dst->ne[1] * dst->ne[2], dst->ne[3]}; - size_t dst_nb[] = {dst->nb[0], dst->nb[1], dst->nb[3]}; - aclTensor* acl_dst = - ggml_cann_create_tensor(dst, dst_ne, dst_nb, GGML_MAX_DIMS - 1); + int64_t dst_ne[] = { dst->ne[0], dst->ne[1] * dst->ne[2], dst->ne[3] }; + size_t dst_nb[] = { dst->nb[0], dst->nb[1], dst->nb[3] }; + aclTensor * acl_dst = ggml_cann_create_tensor(dst, dst_ne, dst_nb, GGML_MAX_DIMS - 1); - int64_t permute_dim[] = {0, 2, 1}; + int64_t permute_dim[] = { 0, 2, 1 }; if (src1->type != dst->type) { aclnn_permute(ctx, tmp_cast_tensor, acl_dst, permute_dim, 3); } else { @@ -1074,101 +1054,95 @@ static void ggml_cann_im2col_2d_post_process(ggml_backend_cann_context& ctx, ggml_cann_release_resources(ctx, acl_dst); } -static void ggml_cann_im2col_1d_post_process( - ggml_backend_cann_context& ctx, ggml_tensor* dst, ggml_tensor* src1, - aclTensor* tmp_cast_tensor, aclTensor* tmp_im2col_tensor, - const std::vector& im2col_op_params) { +static void ggml_cann_im2col_1d_post_process(ggml_backend_cann_context & ctx, + ggml_tensor * dst, + ggml_tensor * src1, + aclTensor * tmp_cast_tensor, + aclTensor * tmp_im2col_tensor, + const std::vector & im2col_op_params) { // get params - const int64_t KH = im2col_op_params[0]; - const int64_t KW = im2col_op_params[1]; - const int64_t IW = im2col_op_params[2]; - const int64_t IC = im2col_op_params[3]; - const int64_t N = im2col_op_params[4]; - const int64_t OH = im2col_op_params[5]; - const int64_t OW = im2col_op_params[6]; - const int64_t s0 = im2col_op_params[7]; - const int64_t p0 = im2col_op_params[8]; - const int64_t d0 = im2col_op_params[9]; + const int64_t KH = im2col_op_params[0]; + const int64_t KW = im2col_op_params[1]; + const int64_t IW = im2col_op_params[2]; + const int64_t IC = im2col_op_params[3]; + const int64_t N = im2col_op_params[4]; + const int64_t OH = im2col_op_params[5]; + const int64_t OW = im2col_op_params[6]; + const int64_t s0 = im2col_op_params[7]; + const int64_t p0 = im2col_op_params[8]; + const int64_t d0 = im2col_op_params[9]; const int64_t n_bytes_factor = im2col_op_params[10]; // Permute: [N, IC * KH * KW, OW * OH] -> // [N, OW * OH * n_bytes_factor, IC * KH * KW] ggml_cann_pool_alloc tmp_permute_allocator(ctx.pool()); tmp_permute_allocator.alloc(ggml_nbytes(dst) * n_bytes_factor); - void* tmp_permute_buffer = tmp_permute_allocator.get(); + void * tmp_permute_buffer = tmp_permute_allocator.get(); - int64_t tmp_permute_ne[] = {IC * KH * KW, OW * OH * n_bytes_factor, N}; - size_t tmp_permute_nb[GGML_MAX_DIMS - 1]; + int64_t tmp_permute_ne[] = { IC * KH * KW, OW * OH * n_bytes_factor, N }; + size_t tmp_permute_nb[GGML_MAX_DIMS - 1]; tmp_permute_nb[0] = ggml_type_size(dst->type); for (int i = 1; i < GGML_MAX_DIMS - 1; i++) { tmp_permute_nb[i] = tmp_permute_nb[i - 1] * tmp_permute_ne[i - 1]; } - aclTensor* tmp_permute_tensor = ggml_cann_create_tensor( - tmp_permute_buffer, ggml_cann_type_mapping(dst->type), - ggml_type_size(dst->type), tmp_permute_ne, tmp_permute_nb, - GGML_MAX_DIMS - 1, ACL_FORMAT_ND); + aclTensor * tmp_permute_tensor = + ggml_cann_create_tensor(tmp_permute_buffer, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), + tmp_permute_ne, tmp_permute_nb, GGML_MAX_DIMS - 1, ACL_FORMAT_ND); - int64_t permute_dim[] = {0, 2, 1}; + int64_t permute_dim[] = { 0, 2, 1 }; if (src1->type != dst->type) { aclnn_permute(ctx, tmp_cast_tensor, tmp_permute_tensor, permute_dim, 3); } else { - aclnn_permute(ctx, tmp_im2col_tensor, tmp_permute_tensor, permute_dim, - 3); + aclnn_permute(ctx, tmp_im2col_tensor, tmp_permute_tensor, permute_dim, 3); } // number of times the kernel moves in W dimension const int n_step_w = (IW + 2 * p0 - d0 * (KW - 1) - 1) / s0 + 1; - size_t offset; - void *cur_dst_buffer = dst->data, *cur_permute_buffer = tmp_permute_buffer; + size_t offset; + void * cur_dst_buffer = dst->data, *cur_permute_buffer = tmp_permute_buffer; // memory copy with offset to restore 1D im2col from 2d if (IC > 1) { - offset = IC * KH * KW * n_step_w * ggml_type_size(dst->type); + offset = IC * KH * KW * n_step_w * ggml_type_size(dst->type); size_t size_cpy = KH * KW * ggml_type_size(dst->type); for (int c = 0; c < IC; c++) { - cur_permute_buffer = (char*)tmp_permute_buffer + offset + - KH * KW * c * ggml_type_size(dst->type); - cur_dst_buffer = (char*)dst->data + - c * KH * KW * n_step_w * ggml_type_size(dst->type); + cur_permute_buffer = (char *) tmp_permute_buffer + offset + KH * KW * c * ggml_type_size(dst->type); + cur_dst_buffer = (char *) dst->data + c * KH * KW * n_step_w * ggml_type_size(dst->type); for (int i = 0; i < n_step_w; i++) { - ggml_cann_async_memcpy(ctx, cur_dst_buffer, cur_permute_buffer, size_cpy, - ACL_MEMCPY_DEVICE_TO_DEVICE); - cur_dst_buffer = - (char*)cur_dst_buffer + KH * KW * ggml_type_size(dst->type); - cur_permute_buffer = (char*)cur_permute_buffer + - KH * KW * IC * ggml_type_size(dst->type); + ggml_cann_async_memcpy(ctx, cur_dst_buffer, cur_permute_buffer, size_cpy, ACL_MEMCPY_DEVICE_TO_DEVICE); + cur_dst_buffer = (char *) cur_dst_buffer + KH * KW * ggml_type_size(dst->type); + cur_permute_buffer = (char *) cur_permute_buffer + KH * KW * IC * ggml_type_size(dst->type); } } } else { - offset = KH * KW * n_step_w * - ggml_type_size(dst->type); // equal to ggml_nbytes(dst) - ggml_cann_async_memcpy(ctx, dst->data, (char*)tmp_permute_buffer + offset, offset, - ACL_MEMCPY_DEVICE_TO_DEVICE); + offset = KH * KW * n_step_w * ggml_type_size(dst->type); // equal to ggml_nbytes(dst) + ggml_cann_async_memcpy(ctx, dst->data, (char *) tmp_permute_buffer + offset, offset, + ACL_MEMCPY_DEVICE_TO_DEVICE); } ggml_cann_release_resources(ctx, tmp_permute_tensor); } -void ggml_cann_im2col(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src0 = dst->src[0]; // kernel - ggml_tensor* src1 = dst->src[1]; // input +void ggml_cann_im2col(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src0 = dst->src[0]; // kernel + ggml_tensor * src1 = dst->src[1]; // input GGML_TENSOR_BINARY_OP_LOCALS; // aclnnIm2col only works on 2D. set s1, p1, d1 to 1 to perform 2D // im2col and do post-processing to restore it to 1D. - const bool is_2D = ((const int32_t*)(dst->op_params))[6] == 1; - const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; - const int32_t s1 = is_2D ? ((const int32_t*)(dst->op_params))[1] : 1; - const int32_t p0 = ((const int32_t*)(dst->op_params))[2]; - const int32_t p1 = is_2D ? ((const int32_t*)(dst->op_params))[3] : 1; - const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; - const int32_t d1 = is_2D ? ((const int32_t*)(dst->op_params))[5] : 1; - - const int64_t N = ne13; + const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = is_2D ? ((const int32_t *) (dst->op_params))[1] : 1; + const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p1 = is_2D ? ((const int32_t *) (dst->op_params))[3] : 1; + const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; + const int32_t d1 = is_2D ? ((const int32_t *) (dst->op_params))[5] : 1; + + const int64_t N = ne13; const int64_t IC = ne12; const int64_t KH = ne01; const int64_t KW = ne00; @@ -1181,9 +1155,9 @@ void ggml_cann_im2col(ggml_backend_cann_context& ctx, ggml_tensor* dst) { const int64_t n_bytes_factor = is_2D ? 1 : 3; // im2col: [N,C,H,W] -> [N, IC * KH * KW, OW * OH * n_bytes_factor] - aclTensor* acl_src1 = ggml_cann_create_tensor(src1); - int64_t tmp_im2col_ne[] = {OW * OH * n_bytes_factor, IC * KH * KW, N}; - size_t tmp_im2col_nb[GGML_MAX_DIMS - 1]; + aclTensor * acl_src1 = ggml_cann_create_tensor(src1); + int64_t tmp_im2col_ne[] = { OW * OH * n_bytes_factor, IC * KH * KW, N }; + size_t tmp_im2col_nb[GGML_MAX_DIMS - 1]; tmp_im2col_nb[0] = ggml_type_size(src1->type); for (int i = 1; i < GGML_MAX_DIMS - 1; i++) { @@ -1193,31 +1167,27 @@ void ggml_cann_im2col(ggml_backend_cann_context& ctx, ggml_tensor* dst) { // Calculate im2col. // If dst is f16, tmp_buffer is f32, we need alloc src.typesize * // dst.elemcount. - ggml_cann_pool_alloc im2col_allocator( - ctx.pool(), - ggml_nelements(dst) * ggml_element_size(src1) * n_bytes_factor); - void* tmp_im2col_buffer = im2col_allocator.get(); - - aclTensor* tmp_im2col_tensor = ggml_cann_create_tensor( - tmp_im2col_buffer, ggml_cann_type_mapping(src1->type), - ggml_type_size(src1->type), tmp_im2col_ne, tmp_im2col_nb, - GGML_MAX_DIMS - 1, ACL_FORMAT_ND); - - std::vector kernel_dims = {KH, KW}; - std::vector dilation_size = {d1, d0}; - std::vector padding_dims = {p1, p0}; - std::vector stride_dims = {s1, s0}; - auto* kernel_size = aclCreateIntArray(kernel_dims.data(), 2); - auto* dilations = aclCreateIntArray(dilation_size.data(), 2); - auto* paddings = aclCreateIntArray(padding_dims.data(), 2); - auto* strides = aclCreateIntArray(stride_dims.data(), 2); - GGML_CANN_CALL_ACLNN_OP(ctx, Im2col, acl_src1, kernel_size, dilations, - paddings, strides, tmp_im2col_tensor); + ggml_cann_pool_alloc im2col_allocator(ctx.pool(), ggml_nelements(dst) * ggml_element_size(src1) * n_bytes_factor); + void * tmp_im2col_buffer = im2col_allocator.get(); + + aclTensor * tmp_im2col_tensor = + ggml_cann_create_tensor(tmp_im2col_buffer, ggml_cann_type_mapping(src1->type), ggml_type_size(src1->type), + tmp_im2col_ne, tmp_im2col_nb, GGML_MAX_DIMS - 1, ACL_FORMAT_ND); + + std::vector kernel_dims = { KH, KW }; + std::vector dilation_size = { d1, d0 }; + std::vector padding_dims = { p1, p0 }; + std::vector stride_dims = { s1, s0 }; + auto * kernel_size = aclCreateIntArray(kernel_dims.data(), 2); + auto * dilations = aclCreateIntArray(dilation_size.data(), 2); + auto * paddings = aclCreateIntArray(padding_dims.data(), 2); + auto * strides = aclCreateIntArray(stride_dims.data(), 2); + GGML_CANN_CALL_ACLNN_OP(ctx, Im2col, acl_src1, kernel_size, dilations, paddings, strides, tmp_im2col_tensor); // Cast if dst is f16. - aclTensor* tmp_cast_tensor = nullptr; + aclTensor * tmp_cast_tensor = nullptr; ggml_cann_pool_alloc tmp_cast_allocator(ctx.pool()); - void* tmp_cast_buffer = nullptr; + void * tmp_cast_buffer = nullptr; if (src1->type != dst->type) { tmp_cast_allocator.alloc(ggml_nbytes(dst) * n_bytes_factor); tmp_cast_buffer = tmp_cast_allocator.get(); @@ -1227,26 +1197,22 @@ void ggml_cann_im2col(ggml_backend_cann_context& ctx, ggml_tensor* dst) { temp_cast_nb[i] = temp_cast_nb[i - 1] * tmp_im2col_ne[i - 1]; } - tmp_cast_tensor = ggml_cann_create_tensor( - tmp_cast_buffer, ggml_cann_type_mapping(dst->type), - ggml_type_size(dst->type), tmp_im2col_ne, temp_cast_nb, - GGML_MAX_DIMS - 1, ACL_FORMAT_ND); + tmp_cast_tensor = + ggml_cann_create_tensor(tmp_cast_buffer, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), + tmp_im2col_ne, temp_cast_nb, GGML_MAX_DIMS - 1, ACL_FORMAT_ND); aclnn_cast(ctx, tmp_im2col_tensor, tmp_cast_tensor, ggml_cann_type_mapping(dst->type)); } // post-processing if (is_2D) { - ggml_cann_im2col_2d_post_process(ctx, dst, src1, tmp_cast_tensor, - tmp_im2col_tensor); + ggml_cann_im2col_2d_post_process(ctx, dst, src1, tmp_cast_tensor, tmp_im2col_tensor); } else { - std::vector im2col_op_params = { - KH, KW, IW, IC, N, OH, OW, s0, p0, d0, n_bytes_factor}; - ggml_cann_im2col_1d_post_process(ctx, dst, src1, tmp_cast_tensor, - tmp_im2col_tensor, im2col_op_params); + std::vector im2col_op_params = { KH, KW, IW, IC, N, OH, OW, s0, p0, d0, n_bytes_factor }; + ggml_cann_im2col_1d_post_process(ctx, dst, src1, tmp_cast_tensor, tmp_im2col_tensor, im2col_op_params); } - ggml_cann_release_resources(ctx, acl_src1, tmp_im2col_tensor, tmp_cast_tensor, - kernel_size, dilations, paddings, strides); + ggml_cann_release_resources(ctx, acl_src1, tmp_im2col_tensor, tmp_cast_tensor, kernel_size, dilations, paddings, + strides); } /** @@ -1262,136 +1228,123 @@ void ggml_cann_im2col(ggml_backend_cann_context& ctx, ggml_tensor* dst) { * @param ctx The context for the CANN backend operations. * @param acl_src The tensor on which the exponential function will be applied. */ -static void aclnn_exp(ggml_backend_cann_context& ctx, aclTensor* acl_src) { +static void aclnn_exp(ggml_backend_cann_context & ctx, aclTensor * acl_src) { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceExp, acl_src); } -void aclnn_cos(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_dst) { - if(acl_dst == nullptr) { +void aclnn_cos(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { + if (acl_dst == nullptr) { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceCos, acl_src); } else { GGML_CANN_CALL_ACLNN_OP(ctx, Cos, acl_src, acl_dst); } } -void aclnn_sin(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_dst) { - if(acl_dst == nullptr) { +void aclnn_sin(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { + if (acl_dst == nullptr) { GGML_CANN_CALL_ACLNN_OP(ctx, InplaceSin, acl_src); } else { GGML_CANN_CALL_ACLNN_OP(ctx, Sin, acl_src, acl_dst); } } -void ggml_cann_timestep_embedding(ggml_backend_cann_context& ctx, - ggml_tensor* dst) { - const ggml_tensor* src = dst->src[0]; +void ggml_cann_timestep_embedding(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src = dst->src[0]; GGML_ASSERT(src->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); - const int dim = dst->op_params[0]; + const int dim = dst->op_params[0]; const int max_period = dst->op_params[1]; - int half = dim / 2; + int half = dim / 2; - aclTensor* acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_src = ggml_cann_create_tensor(src); // arange: [0, ..., half) - float start = 0; - float stop = half; - float step = 1; + float start = 0; + float stop = half; + float step = 1; int64_t n_elements_arange = half; - int64_t tmp_arange_ne[] = {half}; - size_t tmp_arange_nb[] = {sizeof(dst->type)}; + int64_t tmp_arange_ne[] = { half }; + size_t tmp_arange_nb[] = { sizeof(dst->type) }; ggml_cann_pool_alloc arange_allocator(ctx.pool(), half * sizeof(dst->type)); - void* tmp_arange_buffer = arange_allocator.get(); - aclTensor* tmp_arange_tensor = ggml_cann_create_tensor( - tmp_arange_buffer, ggml_cann_type_mapping(dst->type), - ggml_type_size(dst->type), tmp_arange_ne, tmp_arange_nb, - GGML_MAX_DIMS - 3, ACL_FORMAT_ND); + void * tmp_arange_buffer = arange_allocator.get(); + aclTensor * tmp_arange_tensor = + ggml_cann_create_tensor(tmp_arange_buffer, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), + tmp_arange_ne, tmp_arange_nb, GGML_MAX_DIMS - 3, ACL_FORMAT_ND); aclnn_arange(ctx, tmp_arange_tensor, start, stop, step, n_elements_arange); // freq float freq_param = -logf(max_period) / half; - bool inplace = true; + bool inplace = true; aclnn_muls(ctx, tmp_arange_tensor, freq_param, nullptr, inplace); aclnn_exp(ctx, tmp_arange_tensor); // permute: src [0,1,2,3]->[0,1,3,2] - int64_t tmp_permute_ne[] = {src->ne[1], src->ne[0], src->ne[2], src->ne[3]}; - size_t tmp_permute_nb[GGML_MAX_DIMS]; + int64_t tmp_permute_ne[] = { src->ne[1], src->ne[0], src->ne[2], src->ne[3] }; + size_t tmp_permute_nb[GGML_MAX_DIMS]; tmp_permute_nb[0] = ggml_type_size(src->type); for (int i = 1; i < GGML_MAX_DIMS; i++) { tmp_permute_nb[i] = tmp_permute_nb[i - 1] * tmp_permute_ne[i - 1]; } ggml_cann_pool_alloc permute_allocator(ctx.pool(), ggml_nbytes(src)); - void* tmp_permute_buffer = permute_allocator.get(); - aclTensor* tmp_permute_tensor = ggml_cann_create_tensor( - tmp_permute_buffer, ggml_cann_type_mapping(src->type), - ggml_type_size(src->type), tmp_permute_ne, tmp_permute_nb, - GGML_MAX_DIMS, ACL_FORMAT_ND); - int64_t permute_dim[] = {0, 1, 3, 2}; - int64_t num_dims = 4; + void * tmp_permute_buffer = permute_allocator.get(); + aclTensor * tmp_permute_tensor = + ggml_cann_create_tensor(tmp_permute_buffer, ggml_cann_type_mapping(src->type), ggml_type_size(src->type), + tmp_permute_ne, tmp_permute_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); + int64_t permute_dim[] = { 0, 1, 3, 2 }; + int64_t num_dims = 4; aclnn_permute(ctx, acl_src, tmp_permute_tensor, permute_dim, num_dims); // timestep * freq - int64_t tmp_mul_ne[] = {src->ne[1] * half, src->ne[0], src->ne[2], - src->ne[3]}; - size_t tmp_mul_nb[GGML_MAX_DIMS]; + int64_t tmp_mul_ne[] = { src->ne[1] * half, src->ne[0], src->ne[2], src->ne[3] }; + size_t tmp_mul_nb[GGML_MAX_DIMS]; tmp_mul_nb[0] = ggml_type_size(src->type); for (int i = 1; i < GGML_MAX_DIMS; i++) { tmp_mul_nb[i] = tmp_mul_nb[i - 1] * tmp_mul_ne[i - 1]; } - int mul_nelements = - src->ne[1] * half * src->ne[0] * src->ne[2] * src->ne[3]; + int mul_nelements = src->ne[1] * half * src->ne[0] * src->ne[2] * src->ne[3]; - ggml_cann_pool_alloc mul_allocator( - ctx.pool(), mul_nelements * ggml_type_size(src->type)); - void* tmp_mul_buffer = mul_allocator.get(); - aclTensor* tmp_mul_tensor = ggml_cann_create_tensor( - tmp_mul_buffer, ggml_cann_type_mapping(src->type), - ggml_type_size(src->type), tmp_mul_ne, tmp_mul_nb, GGML_MAX_DIMS, - ACL_FORMAT_ND); + ggml_cann_pool_alloc mul_allocator(ctx.pool(), mul_nelements * ggml_type_size(src->type)); + void * tmp_mul_buffer = mul_allocator.get(); + aclTensor * tmp_mul_tensor = + ggml_cann_create_tensor(tmp_mul_buffer, ggml_cann_type_mapping(src->type), ggml_type_size(src->type), + tmp_mul_ne, tmp_mul_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); aclnn_mul(ctx, tmp_permute_tensor, tmp_arange_tensor, tmp_mul_tensor); // cos - ggml_cann_pool_alloc cos_allocator( - ctx.pool(), mul_nelements * ggml_type_size(src->type)); - void* tmp_cos_buffer = cos_allocator.get(); - aclTensor* tmp_cos_tensor = ggml_cann_create_tensor( - tmp_cos_buffer, ggml_cann_type_mapping(dst->type), - ggml_type_size(dst->type), tmp_mul_ne, tmp_mul_nb, GGML_MAX_DIMS, - ACL_FORMAT_ND); + ggml_cann_pool_alloc cos_allocator(ctx.pool(), mul_nelements * ggml_type_size(src->type)); + void * tmp_cos_buffer = cos_allocator.get(); + aclTensor * tmp_cos_tensor = + ggml_cann_create_tensor(tmp_cos_buffer, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), + tmp_mul_ne, tmp_mul_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); aclnn_cos(ctx, tmp_mul_tensor, tmp_cos_tensor); // sin - ggml_cann_pool_alloc sin_allocator( - ctx.pool(), mul_nelements * ggml_type_size(src->type)); - void* tmp_sin_buffer = sin_allocator.get(); - aclTensor* tmp_sin_tensor = ggml_cann_create_tensor( - tmp_sin_buffer, ggml_cann_type_mapping(dst->type), - ggml_type_size(dst->type), tmp_mul_ne, tmp_mul_nb, GGML_MAX_DIMS, - ACL_FORMAT_ND); + ggml_cann_pool_alloc sin_allocator(ctx.pool(), mul_nelements * ggml_type_size(src->type)); + void * tmp_sin_buffer = sin_allocator.get(); + aclTensor * tmp_sin_tensor = + ggml_cann_create_tensor(tmp_sin_buffer, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), + tmp_mul_ne, tmp_mul_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); aclnn_sin(ctx, tmp_mul_tensor, tmp_sin_tensor); // concat - int64_t concat_dim = 3; - aclTensor* acl_dst = ggml_cann_create_tensor(dst); - aclTensor* tensors[] = {tmp_cos_tensor, tmp_sin_tensor}; - aclTensorList* tensor_list = aclCreateTensorList(tensors, 2); + int64_t concat_dim = 3; + aclTensor * acl_dst = ggml_cann_create_tensor(dst); + aclTensor * tensors[] = { tmp_cos_tensor, tmp_sin_tensor }; + aclTensorList * tensor_list = aclCreateTensorList(tensors, 2); aclnn_concat(ctx, tensor_list, acl_dst, concat_dim); // release // segmentation fault when delete both tensorList and his elements. - ggml_cann_release_resources(ctx, tensor_list, acl_src, tmp_arange_tensor, - tmp_permute_tensor, tmp_mul_tensor, acl_dst); + ggml_cann_release_resources(ctx, tensor_list, acl_src, tmp_arange_tensor, tmp_permute_tensor, tmp_mul_tensor, + acl_dst); } /** @@ -1410,8 +1363,7 @@ void ggml_cann_timestep_embedding(ggml_backend_cann_context& ctx, * @param acl_exp The exponent tensor, each element of which is used to raise * the corresponding element in the destination tensor. */ -static void aclnn_pow_tensor_tensor(ggml_backend_cann_context& ctx, - aclTensor* acl_dst, aclTensor* acl_exp) { +static void aclnn_pow_tensor_tensor(ggml_backend_cann_context & ctx, aclTensor * acl_dst, aclTensor * acl_exp) { GGML_CANN_CALL_ACLNN_OP(ctx, InplacePowTensorTensor, acl_dst, acl_exp); } @@ -1436,25 +1388,29 @@ static void aclnn_pow_tensor_tensor(ggml_backend_cann_context& ctx, * @param step Step size for the exponent increment. * @param dtype Data type for slope tensor. */ -static void aclnn_get_slope_inner(ggml_backend_cann_context& ctx, void* slope_buffer, - float m, int64_t size, float start, float stop, float step, ggml_type dtype){ - aclDataType acl_type = ggml_cann_type_mapping(dtype); - size_t type_size = ggml_type_size(dtype); - - int64_t ne[] = {size}; - size_t nb[] = {type_size}; +static void aclnn_get_slope_inner(ggml_backend_cann_context & ctx, + void * slope_buffer, + float m, + int64_t size, + float start, + float stop, + float step, + ggml_type dtype) { + aclDataType acl_type = ggml_cann_type_mapping(dtype); + size_t type_size = ggml_type_size(dtype); + + int64_t ne[] = { size }; + size_t nb[] = { type_size }; ggml_cann_pool_alloc arange_allocator(ctx.pool(), size * type_size); - void* arange_buffer = arange_allocator.get(); + void * arange_buffer = arange_allocator.get(); - aclTensor* arange_tensor = ggml_cann_create_tensor( - arange_buffer, acl_type, type_size, ne, nb, 1); + aclTensor * arange_tensor = ggml_cann_create_tensor(arange_buffer, acl_type, type_size, ne, nb, 1); aclnn_arange(ctx, arange_tensor, start, stop, step, size); - aclTensor* slope_tensor = ggml_cann_create_tensor( - slope_buffer, acl_type, type_size, ne, nb, 1); + aclTensor * slope_tensor = ggml_cann_create_tensor(slope_buffer, acl_type, type_size, ne, nb, 1); - aclScalar* sc = aclCreateScalar(&m, aclDataType::ACL_FLOAT); + aclScalar * sc = aclCreateScalar(&m, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, PowScalarTensor, sc, arange_tensor, slope_tensor); ggml_cann_release_resources(ctx, sc, arange_tensor, slope_tensor); @@ -1486,8 +1442,11 @@ static void aclnn_get_slope_inner(ggml_backend_cann_context& ctx, void* slope_bu * @param dtype Data type for slope tensor. * */ -static void aclnn_get_slope(ggml_backend_cann_context & ctx, int64_t n_head, - void* slope_buffer, float max_bias, ggml_type dtype) { +static void aclnn_get_slope(ggml_backend_cann_context & ctx, + int64_t n_head, + void * slope_buffer, + float max_bias, + ggml_type dtype) { const int n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); float m0 = powf(2.0f, -(max_bias) / n_head_log2); @@ -1511,9 +1470,8 @@ static void aclnn_get_slope(ggml_backend_cann_context & ctx, int64_t n_head, end = 2 * ((n_head - 1) - n_head_log2) + 1; step = 2; count = n_head - n_head_log2; - aclnn_get_slope_inner( - ctx, (char *) slope_buffer + n_head_log2 * sizeof(float), - m1, count, start, end + 1, step, dtype); + aclnn_get_slope_inner(ctx, (char *) slope_buffer + n_head_log2 * sizeof(float), m1, count, start, end + 1, step, + dtype); } } @@ -1538,17 +1496,19 @@ static void aclnn_get_slope(ggml_backend_cann_context & ctx, int64_t n_head, * - Write data into dst_ptr using only the shape information of the dst tensor. * - `GGML_MAX_DIMS + 2` is used to extend tensor dimensions for broadcasting. */ -static void aclnn_add_alibi(ggml_backend_cann_context& ctx, ggml_tensor* mask, - ggml_tensor* dst, void* dst_ptr, float max_bias) { - void* slope_buffer = nullptr; - void* bias_buffer = nullptr; +static void aclnn_add_alibi(ggml_backend_cann_context & ctx, + ggml_tensor * mask, + ggml_tensor * dst, + void * dst_ptr, + float max_bias) { + void * slope_buffer = nullptr; + void * bias_buffer = nullptr; if (max_bias > 0.0f) { - int64_t n_heads = dst->ne[2]; + int64_t n_heads = dst->ne[2]; ggml_cann_pool_alloc slope_allocator(ctx.pool(), n_heads * sizeof(float)); slope_buffer = slope_allocator.get(); - ggml_cann_pool_alloc bias_allocator( - ctx.pool(), ggml_nelements(dst) * ggml_element_size(dst)); + ggml_cann_pool_alloc bias_allocator(ctx.pool(), ggml_nelements(dst) * ggml_element_size(dst)); bias_buffer = bias_allocator.get(); aclnn_get_slope(ctx, n_heads, slope_buffer, max_bias, GGML_TYPE_F32); } @@ -1559,16 +1519,12 @@ static void aclnn_add_alibi(ggml_backend_cann_context& ctx, ggml_tensor* mask, // broadcast the mask across rows int64_t mask_ne[] = { mask->ne[0], dst->ne[1], mask->ne[2], 1, mask->ne[3], 1 }; - size_t mask_nb[] = { - mask_nb[0] = mask->nb[0], mask_nb[1] = mask->nb[1], mask_nb[2] = mask->nb[2], - mask_nb[3] = mask->nb[2], mask_nb[4] = mask->nb[3], mask_nb[5] = mask->nb[3] - }; + size_t mask_nb[] = { mask_nb[0] = mask->nb[0], mask_nb[1] = mask->nb[1], mask_nb[2] = mask->nb[2], + mask_nb[3] = mask->nb[2], mask_nb[4] = mask->nb[3], mask_nb[5] = mask->nb[3] }; int64_t dst_ne[] = { dst->ne[0], dst->ne[1], mask->ne[2], nr2, mask->ne[3], nr3 }; - size_t dst_nb[] = { - dst_nb[0] = dst->nb[0], dst_nb[1] = dst->nb[1], dst_nb[2] = dst->nb[2], - dst_nb[3] = dst->nb[2], dst_nb[4] = dst->nb[3], dst_nb[5] = dst->nb[3] - }; + size_t dst_nb[] = { dst_nb[0] = dst->nb[0], dst_nb[1] = dst->nb[1], dst_nb[2] = dst->nb[2], + dst_nb[3] = dst->nb[2], dst_nb[4] = dst->nb[3], dst_nb[5] = dst->nb[3] }; // slope is a 1 dim tensor, slope.ne2 == dst.ne2 int64_t slope_ne[] = { 1, 1, mask->ne[2], nr2, 1, 1 }; @@ -1578,17 +1534,13 @@ static void aclnn_add_alibi(ggml_backend_cann_context& ctx, ggml_tensor* mask, slope_nb[i] = slope_nb[i - 1] * slope_ne[i - 1]; } - aclTensor* acl_slope = ggml_cann_create_tensor( - slope_buffer, ACL_FLOAT, sizeof(float), - slope_ne, slope_nb, GGML_MAX_DIMS + 2); - aclTensor* acl_mask = ggml_cann_create_tensor( - mask, mask_ne, mask_nb, GGML_MAX_DIMS + 2); + aclTensor * acl_slope = + ggml_cann_create_tensor(slope_buffer, ACL_FLOAT, sizeof(float), slope_ne, slope_nb, GGML_MAX_DIMS + 2); + aclTensor * acl_mask = ggml_cann_create_tensor(mask, mask_ne, mask_nb, GGML_MAX_DIMS + 2); // write data into dst_ptr using only the shape information of the dst tensor. - aclTensor* acl_dst = ggml_cann_create_tensor( - dst_ptr, ggml_cann_type_mapping(dst->type), - ggml_type_size(dst->type), dst_ne, dst_nb, - GGML_MAX_DIMS + 2); + aclTensor * acl_dst = ggml_cann_create_tensor(dst_ptr, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), + dst_ne, dst_nb, GGML_MAX_DIMS + 2); if (max_bias > 0.0f) { int64_t bias_ne[] = { mask->ne[0], dst->ne[1], mask->ne[2], nr2, mask->ne[3], 1 }; @@ -1597,9 +1549,8 @@ static void aclnn_add_alibi(ggml_backend_cann_context& ctx, ggml_tensor* mask, for (int i = 1; i < GGML_MAX_DIMS + 2; i++) { bias_nb[i] = bias_nb[i - 1] * bias_ne[i - 1]; } - aclTensor* bias_tensor = ggml_cann_create_tensor( - bias_buffer, ACL_FLOAT, sizeof(float), - bias_ne, bias_nb, GGML_MAX_DIMS + 2); + aclTensor * bias_tensor = + ggml_cann_create_tensor(bias_buffer, ACL_FLOAT, sizeof(float), bias_ne, bias_nb, GGML_MAX_DIMS + 2); aclnn_mul(ctx, acl_slope, acl_mask, bias_tensor); aclnn_add(ctx, acl_dst, bias_tensor); @@ -1628,17 +1579,16 @@ void ggml_cann_cpy(ggml_backend_cann_context & ctx, ggml_tensor * dst) { * @param acl_dst The destination tensor where the softmax results will be * stored. */ -static void aclnn_softmax(ggml_backend_cann_context & ctx, - aclTensor* acl_src, int64_t dim, aclTensor * acl_dst) { +static void aclnn_softmax(ggml_backend_cann_context & ctx, aclTensor * acl_src, int64_t dim, aclTensor * acl_dst) { GGML_CANN_CALL_ACLNN_OP(ctx, Softmax, acl_src, dim, acl_dst); } void ggml_cann_softmax(ggml_backend_cann_context & ctx, ggml_tensor * dst) { - ggml_tensor* src0 = dst->src[0]; - ggml_tensor* src1 = dst->src[1]; // mask + ggml_tensor * src0 = dst->src[0]; + ggml_tensor * src1 = dst->src[1]; // mask - aclTensor* acl_src0 = ggml_cann_create_tensor(src0); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src0 = ggml_cann_create_tensor(src0); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); float scale = 1.0f; float max_bias = 0.0f; @@ -1647,12 +1597,11 @@ void ggml_cann_softmax(ggml_backend_cann_context & ctx, ggml_tensor * dst) { memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); // input mul scale - aclScalar* acl_scale = aclCreateScalar(&scale, aclDataType::ACL_FLOAT); + aclScalar * acl_scale = aclCreateScalar(&scale, aclDataType::ACL_FLOAT); ggml_cann_pool_alloc src_tensor_allocator(ctx.pool(), ggml_nbytes(src0)); - void* src_tensor_buffer = src_tensor_allocator.get(); - aclTensor* softmax_tensor = ggml_cann_create_tensor( - src_tensor_buffer, ggml_cann_type_mapping(src0->type), - ggml_element_size(src0), src0->ne, src0->nb,GGML_MAX_DIMS); + void * src_tensor_buffer = src_tensor_allocator.get(); + aclTensor * softmax_tensor = ggml_cann_create_tensor(src_tensor_buffer, ggml_cann_type_mapping(src0->type), + ggml_element_size(src0), src0->ne, src0->nb, GGML_MAX_DIMS); aclnn_muls(ctx, acl_src0, scale, softmax_tensor, false); @@ -1684,29 +1633,31 @@ void ggml_cann_softmax(ggml_backend_cann_context & ctx, ggml_tensor * dst) { * @param index The index tensor specifying the indices to select from the source tensor. * @param type The data type of the source and destination tensors. */ -static void aclnn_index_select_4d(ggml_backend_cann_context& ctx, - void* src_buffer,int64_t* src_ne, size_t* src_nb, - void* dst_buffer, int64_t* dst_ne, size_t* dst_nb, - ggml_tensor* index, ggml_type type) { +static void aclnn_index_select_4d(ggml_backend_cann_context & ctx, + void * src_buffer, + int64_t * src_ne, + size_t * src_nb, + void * dst_buffer, + int64_t * dst_ne, + size_t * dst_nb, + ggml_tensor * index, + ggml_type type) { for (int64_t i = 0; i < src_ne[3]; i++) { for (int64_t j = 0; j < src_ne[2]; j++) { // src - aclTensor* acl_src_tensor = ggml_cann_create_tensor( - (char*)src_buffer + i * src_nb[3] + j * src_nb[2], - ggml_cann_type_mapping(type), ggml_type_size(type), - src_ne, src_nb, 2); + aclTensor * acl_src_tensor = + ggml_cann_create_tensor((char *) src_buffer + i * src_nb[3] + j * src_nb[2], + ggml_cann_type_mapping(type), ggml_type_size(type), src_ne, src_nb, 2); // index - aclTensor* acl_index = ggml_cann_create_tensor( - (char*)index->data + (i % index->ne[2]) * index->nb[2] + (j % index->ne[1]) * index->nb[1], - ggml_cann_type_mapping(index->type), ggml_element_size(index), - index->ne, index->nb, 1); + aclTensor * acl_index = ggml_cann_create_tensor( + (char *) index->data + (i % index->ne[2]) * index->nb[2] + (j % index->ne[1]) * index->nb[1], + ggml_cann_type_mapping(index->type), ggml_element_size(index), index->ne, index->nb, 1); // out - aclTensor* acl_out = ggml_cann_create_tensor( - (char*)dst_buffer + i * dst_nb[3] + j * dst_nb[2], - ggml_cann_type_mapping(type), ggml_type_size(type), - dst_ne, dst_nb, 2); + aclTensor * acl_out = + ggml_cann_create_tensor((char *) dst_buffer + i * dst_nb[3] + j * dst_nb[2], + ggml_cann_type_mapping(type), ggml_type_size(type), dst_ne, dst_nb, 2); GGML_CANN_CALL_ACLNN_OP(ctx, IndexSelect, acl_src_tensor, 0, acl_index, acl_out); ggml_cann_release_resources(ctx, acl_src_tensor, acl_index, acl_out); } @@ -1733,162 +1684,154 @@ static void aclnn_index_select_4d(ggml_backend_cann_context& ctx, * @param index The index tensor specifying target positions in the destination tensor. * @param type The data type of the source and destination tensors. */ -static void aclnn_index_copy_4d(ggml_backend_cann_context& ctx, - void* src_buffer,int64_t* src_ne, size_t* src_nb, - void* dst_buffer, int64_t* dst_ne, size_t* dst_nb, - ggml_tensor* index, ggml_type type) { +static void aclnn_index_copy_4d(ggml_backend_cann_context & ctx, + void * src_buffer, + int64_t * src_ne, + size_t * src_nb, + void * dst_buffer, + int64_t * dst_ne, + size_t * dst_nb, + ggml_tensor * index, + ggml_type type) { for (int64_t i = 0; i < src_ne[3]; i++) { for (int64_t j = 0; j < src_ne[2]; j++) { // src - aclTensor* acl_src_tensor = ggml_cann_create_tensor( - (char*)src_buffer + i * src_nb[3] + j * src_nb[2], - ggml_cann_type_mapping(type), ggml_type_size(type), - src_ne, src_nb, 2); + aclTensor * acl_src_tensor = + ggml_cann_create_tensor((char *) src_buffer + i * src_nb[3] + j * src_nb[2], + ggml_cann_type_mapping(type), ggml_type_size(type), src_ne, src_nb, 2); // index - aclTensor* acl_index = ggml_cann_create_tensor( - (char*)index->data + (i % index->ne[2]) * index->nb[2] + (j % index->ne[1]) * index->nb[1], - ggml_cann_type_mapping(index->type), ggml_element_size(index), - index->ne, index->nb, 1); + aclTensor * acl_index = ggml_cann_create_tensor( + (char *) index->data + (i % index->ne[2]) * index->nb[2] + (j % index->ne[1]) * index->nb[1], + ggml_cann_type_mapping(index->type), ggml_element_size(index), index->ne, index->nb, 1); // out - aclTensor* acl_out = ggml_cann_create_tensor( - (char*)dst_buffer + i * dst_nb[3] + j * dst_nb[2], - ggml_cann_type_mapping(type), ggml_type_size(type), - dst_ne, dst_nb, 2); + aclTensor * acl_out = + ggml_cann_create_tensor((char *) dst_buffer + i * dst_nb[3] + j * dst_nb[2], + ggml_cann_type_mapping(type), ggml_type_size(type), dst_ne, dst_nb, 2); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceIndexCopy, acl_out, 0, acl_index, acl_src_tensor); ggml_cann_release_resources(ctx, acl_src_tensor, acl_index, acl_out); } } } -void ggml_cann_get_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src0 = dst->src[0]; // src - ggml_tensor* src1 = dst->src[1]; // index +void ggml_cann_get_rows(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src0 = dst->src[0]; // src + ggml_tensor * src1 = dst->src[1]; // index GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); switch (src0->type) { case GGML_TYPE_F16: case GGML_TYPE_F32: - if(src0->type == dst->type) { - aclnn_index_select_4d(ctx, src0->data, src0->ne, src0->nb, - dst->data, dst->ne, dst->nb, - src1, dst->type); + if (src0->type == dst->type) { + aclnn_index_select_4d(ctx, src0->data, src0->ne, src0->nb, dst->data, dst->ne, dst->nb, src1, + dst->type); } else { - aclTensor* acl_src0 = ggml_cann_create_tensor(src0); - ggml_cann_pool_alloc src_buffer_allocator( - ctx.pool(), ggml_nelements(src0) * ggml_element_size(dst)); - void* src_trans_buffer = src_buffer_allocator.get(); - size_t src_trans_nb[GGML_MAX_DIMS]; + aclTensor * acl_src0 = ggml_cann_create_tensor(src0); + ggml_cann_pool_alloc src_buffer_allocator(ctx.pool(), ggml_nelements(src0) * ggml_element_size(dst)); + void * src_trans_buffer = src_buffer_allocator.get(); + size_t src_trans_nb[GGML_MAX_DIMS]; src_trans_nb[0] = dst->nb[0]; for (int i = 1; i < GGML_MAX_DIMS; i++) { src_trans_nb[i] = src_trans_nb[i - 1] * src0->ne[i - 1]; } - aclTensor* src_trans_tensor = ggml_cann_create_tensor( - src_trans_buffer, ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), - src0->ne, src_trans_nb, GGML_MAX_DIMS); + aclTensor * src_trans_tensor = + ggml_cann_create_tensor(src_trans_buffer, ggml_cann_type_mapping(dst->type), + ggml_type_size(dst->type), src0->ne, src_trans_nb, GGML_MAX_DIMS); aclnn_cast(ctx, acl_src0, src_trans_tensor, ggml_cann_type_mapping(dst->type)); - aclnn_index_select_4d(ctx, src_trans_buffer, src0->ne, src_trans_nb, - dst->data, dst->ne, dst->nb, - src1, dst->type); + aclnn_index_select_4d(ctx, src_trans_buffer, src0->ne, src_trans_nb, dst->data, dst->ne, dst->nb, src1, + dst->type); ggml_cann_release_resources(ctx, acl_src0, src_trans_tensor); } break; - case GGML_TYPE_Q8_0: { - // add 1 dim for bcast mul. - size_t weight_nb[GGML_MAX_DIMS + 1], scale_nb[GGML_MAX_DIMS + 1], - dequant_nb[GGML_MAX_DIMS + 1]; - int64_t weight_ne[GGML_MAX_DIMS + 1], scale_ne[GGML_MAX_DIMS + 1], - *dequant_ne; - int64_t scale_offset = 0; - // [3,4,5,64] -> [3,4,5,2,32] - weight_ne[0] = QK8_0; - weight_ne[1] = src0->ne[0] / QK8_0; - weight_nb[0] = sizeof(int8_t); - weight_nb[1] = weight_nb[0] * weight_ne[0]; - for (int i = 2; i < GGML_MAX_DIMS + 1; i++) { - weight_ne[i] = src0->ne[i - 1]; - weight_nb[i] = weight_nb[i - 1] * weight_ne[i - 1]; - } - // [3,4,5,64] -> [3,4,5,2,1] - scale_ne[0] = 1; - scale_ne[1] = src0->ne[0] / QK8_0; - scale_nb[0] = sizeof(uint16_t); - scale_nb[1] = scale_nb[0] * scale_ne[0]; - for (int i = 2; i < GGML_MAX_DIMS + 1; i++) { - scale_ne[i] = src0->ne[i - 1]; - scale_nb[i] = scale_nb[i - 1] * scale_ne[i - 1]; - } - // [3,4,5,64] -> [3,4,5,2,32] - dequant_ne = weight_ne; - dequant_nb[0] = ggml_type_size(dst->type); - for (int i = 1; i < GGML_MAX_DIMS + 1; i++) { - dequant_nb[i] = dequant_nb[i - 1] * dequant_ne[i - 1]; - } - scale_offset = ggml_nelements(src0) * sizeof(int8_t); - ggml_cann_pool_alloc dequant_buffer_allocator( - ctx.pool(), ggml_nelements(src0) * ggml_type_size(dst->type)); - aclTensor* acl_weight_tensor = ggml_cann_create_tensor( - src0->data, ACL_INT8, sizeof(int8_t), weight_ne, weight_nb, - GGML_MAX_DIMS + 1); - aclTensor* acl_scale_tensor = ggml_cann_create_tensor( - src0->data, ACL_FLOAT16, sizeof(uint16_t), scale_ne, scale_nb, - GGML_MAX_DIMS + 1, ACL_FORMAT_ND, scale_offset); - aclTensor* dequant_tensor = ggml_cann_create_tensor( - dequant_buffer_allocator.get(), ggml_cann_type_mapping(dst->type), ggml_type_size(dst->type), - dequant_ne, dequant_nb, GGML_MAX_DIMS + 1); - aclnn_mul(ctx, acl_weight_tensor, acl_scale_tensor, dequant_tensor); - dequant_nb[0] = ggml_type_size(dst->type); - dequant_ne = src0->ne; - for (int i = 1; i < GGML_MAX_DIMS; i++) { - dequant_nb[i] = dequant_nb[i - 1] * src0->ne[i - 1]; - } - aclnn_index_select_4d(ctx, dequant_buffer_allocator.get(), - dequant_ne, dequant_nb, - dst->data, dst->ne, dst->nb, - src1, dst->type); + case GGML_TYPE_Q8_0: + { + // add 1 dim for bcast mul. + size_t weight_nb[GGML_MAX_DIMS + 1], scale_nb[GGML_MAX_DIMS + 1], dequant_nb[GGML_MAX_DIMS + 1]; + int64_t weight_ne[GGML_MAX_DIMS + 1], scale_ne[GGML_MAX_DIMS + 1], *dequant_ne; + int64_t scale_offset = 0; + // [3,4,5,64] -> [3,4,5,2,32] + weight_ne[0] = QK8_0; + weight_ne[1] = src0->ne[0] / QK8_0; + weight_nb[0] = sizeof(int8_t); + weight_nb[1] = weight_nb[0] * weight_ne[0]; + for (int i = 2; i < GGML_MAX_DIMS + 1; i++) { + weight_ne[i] = src0->ne[i - 1]; + weight_nb[i] = weight_nb[i - 1] * weight_ne[i - 1]; + } + // [3,4,5,64] -> [3,4,5,2,1] + scale_ne[0] = 1; + scale_ne[1] = src0->ne[0] / QK8_0; + scale_nb[0] = sizeof(uint16_t); + scale_nb[1] = scale_nb[0] * scale_ne[0]; + for (int i = 2; i < GGML_MAX_DIMS + 1; i++) { + scale_ne[i] = src0->ne[i - 1]; + scale_nb[i] = scale_nb[i - 1] * scale_ne[i - 1]; + } + // [3,4,5,64] -> [3,4,5,2,32] + dequant_ne = weight_ne; + dequant_nb[0] = ggml_type_size(dst->type); + for (int i = 1; i < GGML_MAX_DIMS + 1; i++) { + dequant_nb[i] = dequant_nb[i - 1] * dequant_ne[i - 1]; + } + scale_offset = ggml_nelements(src0) * sizeof(int8_t); + ggml_cann_pool_alloc dequant_buffer_allocator(ctx.pool(), + ggml_nelements(src0) * ggml_type_size(dst->type)); + aclTensor * acl_weight_tensor = ggml_cann_create_tensor(src0->data, ACL_INT8, sizeof(int8_t), weight_ne, + weight_nb, GGML_MAX_DIMS + 1); + aclTensor * acl_scale_tensor = + ggml_cann_create_tensor(src0->data, ACL_FLOAT16, sizeof(uint16_t), scale_ne, scale_nb, + GGML_MAX_DIMS + 1, ACL_FORMAT_ND, scale_offset); + aclTensor * dequant_tensor = + ggml_cann_create_tensor(dequant_buffer_allocator.get(), ggml_cann_type_mapping(dst->type), + ggml_type_size(dst->type), dequant_ne, dequant_nb, GGML_MAX_DIMS + 1); + aclnn_mul(ctx, acl_weight_tensor, acl_scale_tensor, dequant_tensor); + dequant_nb[0] = ggml_type_size(dst->type); + dequant_ne = src0->ne; + for (int i = 1; i < GGML_MAX_DIMS; i++) { + dequant_nb[i] = dequant_nb[i - 1] * src0->ne[i - 1]; + } + aclnn_index_select_4d(ctx, dequant_buffer_allocator.get(), dequant_ne, dequant_nb, dst->data, dst->ne, + dst->nb, src1, dst->type); - ggml_cann_release_resources(ctx, acl_weight_tensor, acl_scale_tensor, dequant_tensor); - break; - } + ggml_cann_release_resources(ctx, acl_weight_tensor, acl_scale_tensor, dequant_tensor); + break; + } default: GGML_ABORT("Unsupported tensor type for GGML_OP_GET_ROWS"); break; } } -void ggml_cann_set_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src0 = dst->src[0]; // src - ggml_tensor* src1 = dst->src[1]; // index +void ggml_cann_set_rows(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src0 = dst->src[0]; // src + ggml_tensor * src1 = dst->src[1]; // index switch (dst->type) { - case GGML_TYPE_F32: { - aclnn_index_copy_4d(ctx, src0->data, src0->ne, src0->nb, - dst->data, dst->ne, dst->nb, - src1, dst->type); - break; - } - case GGML_TYPE_F16: { - aclTensor* acl_src0 = ggml_cann_create_tensor(src0); - ggml_cann_pool_alloc src_buffer_allocator( - ctx.pool(), ggml_nelements(src0) * sizeof(uint16_t)); - void* src_trans_buffer = src_buffer_allocator.get(); - size_t src_trans_nb[GGML_MAX_DIMS]; - src_trans_nb[0] = sizeof(uint16_t); - for (int i = 1; i < GGML_MAX_DIMS; i++) { - src_trans_nb[i] = src_trans_nb[i - 1] * src0->ne[i - 1]; + case GGML_TYPE_F32: + { + aclnn_index_copy_4d(ctx, src0->data, src0->ne, src0->nb, dst->data, dst->ne, dst->nb, src1, dst->type); + break; + } + case GGML_TYPE_F16: + { + aclTensor * acl_src0 = ggml_cann_create_tensor(src0); + ggml_cann_pool_alloc src_buffer_allocator(ctx.pool(), ggml_nelements(src0) * sizeof(uint16_t)); + void * src_trans_buffer = src_buffer_allocator.get(); + size_t src_trans_nb[GGML_MAX_DIMS]; + src_trans_nb[0] = sizeof(uint16_t); + for (int i = 1; i < GGML_MAX_DIMS; i++) { + src_trans_nb[i] = src_trans_nb[i - 1] * src0->ne[i - 1]; + } + aclTensor * src_trans_tensor = ggml_cann_create_tensor( + src_trans_buffer, ACL_FLOAT16, ggml_type_size(dst->type), src0->ne, src_trans_nb, GGML_MAX_DIMS); + aclnn_cast(ctx, acl_src0, src_trans_tensor, ggml_cann_type_mapping(dst->type)); + aclnn_index_copy_4d(ctx, src_trans_buffer, src0->ne, src_trans_nb, dst->data, dst->ne, dst->nb, src1, + dst->type); + ggml_cann_release_resources(ctx, acl_src0, src_trans_tensor); + break; } - aclTensor* src_trans_tensor = ggml_cann_create_tensor( - src_trans_buffer, ACL_FLOAT16, ggml_type_size(dst->type), - src0->ne, src_trans_nb, GGML_MAX_DIMS); - aclnn_cast(ctx, acl_src0, src_trans_tensor, ggml_cann_type_mapping(dst->type)); - aclnn_index_copy_4d(ctx, src_trans_buffer, src0->ne, src_trans_nb, - dst->data, dst->ne, dst->nb, - src1, dst->type); - ggml_cann_release_resources(ctx, acl_src0, src_trans_tensor); - break; - } default: GGML_ABORT("Unsupported tensor type for GGML_OP_SET_ROWS"); break; @@ -1910,12 +1853,13 @@ void ggml_cann_set_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst) { * @param repeats The number of times each element will be repeated. * @param output_size The size of the output tensor. */ -static void aclnn_repeat_interleave(ggml_backend_cann_context& ctx, - aclTensor* acl_src, aclTensor* acl_dst, - int64_t dim, int64_t repeats, - int64_t output_size) { - GGML_CANN_CALL_ACLNN_OP(ctx, RepeatInterleaveIntWithDim, acl_src, repeats, dim, - output_size, acl_dst); +static void aclnn_repeat_interleave(ggml_backend_cann_context & ctx, + aclTensor * acl_src, + aclTensor * acl_dst, + int64_t dim, + int64_t repeats, + int64_t output_size) { + GGML_CANN_CALL_ACLNN_OP(ctx, RepeatInterleaveIntWithDim, acl_src, repeats, dim, output_size, acl_dst); } /** @@ -1930,10 +1874,9 @@ static void aclnn_repeat_interleave(ggml_backend_cann_context& ctx, * @param dst The destination tensor where the result of the matrix * multiplication will be stored. */ -static void ggml_cann_mat_mul_fp(ggml_backend_cann_context& ctx, - ggml_tensor* dst) { - ggml_tensor* weight = dst->src[0]; // weight - ggml_tensor* input = dst->src[1]; // input +static void ggml_cann_mat_mul_fp(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * weight = dst->src[0]; // weight + ggml_tensor * input = dst->src[1]; // input // when weight ne2 or ne3 is 1, aclnnMatmulGetWorkspaceSize will auto // broadcast, when weight ne2 or ne3 is not 1, weight need repeat. @@ -1948,27 +1891,21 @@ static void ggml_cann_mat_mul_fp(ggml_backend_cann_context& ctx, } } - aclTensor* acl_input_tensor = - ggml_cann_create_tensor(input, bcast_input_ne, bcast_input_nb, n_dims); - int64_t transpose_ne[] = {bcast_weight_ne[1], bcast_weight_ne[0], - bcast_weight_ne[2], bcast_weight_ne[3], - bcast_weight_ne[4], bcast_weight_ne[5]}; - size_t transpose_nb[] = {bcast_weight_nb[1], bcast_weight_nb[0], - bcast_weight_nb[2], bcast_weight_nb[3], - bcast_weight_nb[4], bcast_weight_nb[5]}; - aclTensor* acl_weight_tensor; + aclTensor * acl_input_tensor = ggml_cann_create_tensor(input, bcast_input_ne, bcast_input_nb, n_dims); + int64_t transpose_ne[] = { bcast_weight_ne[1], bcast_weight_ne[0], bcast_weight_ne[2], + bcast_weight_ne[3], bcast_weight_ne[4], bcast_weight_ne[5] }; + size_t transpose_nb[] = { bcast_weight_nb[1], bcast_weight_nb[0], bcast_weight_nb[2], + bcast_weight_nb[3], bcast_weight_nb[4], bcast_weight_nb[5] }; + aclTensor * acl_weight_tensor; // Only check env once. static bool weight_to_nz = parse_bool(get_env("GGML_CANN_WEIGHT_NZ").value_or("on")); if (weight_to_nz && is_matmul_weight(weight)) { - acl_weight_tensor = - ggml_cann_create_tensor(weight, transpose_ne, transpose_nb, n_dims, ACL_FORMAT_FRACTAL_NZ); + acl_weight_tensor = ggml_cann_create_tensor(weight, transpose_ne, transpose_nb, n_dims, ACL_FORMAT_FRACTAL_NZ); } else { - acl_weight_tensor = - ggml_cann_create_tensor(weight, transpose_ne, transpose_nb, n_dims, ACL_FORMAT_ND); + acl_weight_tensor = ggml_cann_create_tensor(weight, transpose_ne, transpose_nb, n_dims, ACL_FORMAT_ND); } - aclTensor* acl_dst = - ggml_cann_create_tensor(dst, bcast_dst_ne, bcast_dst_nb, n_dims); + aclTensor * acl_dst = ggml_cann_create_tensor(dst, bcast_dst_ne, bcast_dst_nb, n_dims); switch (n_dims) { case 2: @@ -2000,11 +1937,9 @@ static void ggml_cann_mat_mul_fp(ggml_backend_cann_context& ctx, * @param dst The destination tensor where the result of the matrix * multiplication will be stored. */ -static void ggml_cann_mul_mat_quant(ggml_backend_cann_context& ctx, - ggml_tensor* dst, - const enum ggml_type type) { - ggml_tensor* src0 = dst->src[0]; // weight - ggml_tensor* src1 = dst->src[1]; // input +static void ggml_cann_mul_mat_quant(ggml_backend_cann_context & ctx, ggml_tensor * dst, const enum ggml_type type) { + ggml_tensor * src0 = dst->src[0]; // weight + ggml_tensor * src1 = dst->src[1]; // input // The shape of the weight is NCHW. // Matrix multiplication uses HW dims. @@ -2018,56 +1953,52 @@ static void ggml_cann_mul_mat_quant(ggml_backend_cann_context& ctx, } else { GGML_ABORT("Only support Q4_0 and Q8_0 MUL_MAT"); } - float weight_nb[] = {src0->ne[0] * weight_elem_size, weight_elem_size}; + float weight_nb[] = { src0->ne[0] * weight_elem_size, weight_elem_size }; size_t weight_stride = src0->ne[1] * src0->ne[0] * weight_elem_size; - size_t weight_size = weight_stride * src0->ne[2] * src0->ne[3]; + size_t weight_size = weight_stride * src0->ne[2] * src0->ne[3]; // scale stored at the end of weight. Also need transpose. size_t scale_elem_size = sizeof(uint16_t); - size_t scale_nb[] = {src0->ne[0] / QK8_0 * scale_elem_size, - scale_elem_size}; - size_t scale_stride = src0->ne[1] * src0->ne[0] / QK8_0 * scale_elem_size; - char* scale_offset = (char*)src0->data + weight_size; + size_t scale_nb[] = { src0->ne[0] / QK8_0 * scale_elem_size, scale_elem_size }; + size_t scale_stride = src0->ne[1] * src0->ne[0] / QK8_0 * scale_elem_size; + char * scale_offset = (char *) src0->data + weight_size; // input - size_t input_elem_size = sizeof(uint16_t); - int64_t input_ne[] = {src1->ne[0], src1->ne[1]}; - size_t input_nb[] = {input_elem_size, input_ne[0] * input_elem_size}; - size_t input_stride = input_ne[0] * input_ne[1] * input_elem_size; + size_t input_elem_size = sizeof(uint16_t); + int64_t input_ne[] = { src1->ne[0], src1->ne[1] }; + size_t input_nb[] = { input_elem_size, input_ne[0] * input_elem_size }; + size_t input_stride = input_ne[0] * input_ne[1] * input_elem_size; ggml_cann_pool_alloc input_alloctor(ctx.pool()); - void* input_buffer = src1->data; + void * input_buffer = src1->data; // case in if (src1->type != GGML_TYPE_F16) { - aclTensor* acl_src1_tensor = ggml_cann_create_tensor(src1); - input_buffer = - input_alloctor.alloc(ggml_nelements(src1) * input_elem_size); + aclTensor * acl_src1_tensor = ggml_cann_create_tensor(src1); + input_buffer = input_alloctor.alloc(ggml_nelements(src1) * input_elem_size); - int64_t* input_cast_ne = src1->ne; - size_t input_cast_nb[GGML_MAX_DIMS]; + int64_t * input_cast_ne = src1->ne; + size_t input_cast_nb[GGML_MAX_DIMS]; input_cast_nb[0] = sizeof(uint16_t); for (int i = 1; i < GGML_MAX_DIMS; i++) { input_cast_nb[i] = input_cast_nb[i - 1] * input_cast_ne[i - 1]; } - aclTensor* acl_input_tensor = ggml_cann_create_tensor( - input_buffer, ACL_FLOAT16, input_elem_size, input_cast_ne, - input_cast_nb, GGML_MAX_DIMS); + aclTensor * acl_input_tensor = ggml_cann_create_tensor(input_buffer, ACL_FLOAT16, input_elem_size, + input_cast_ne, input_cast_nb, GGML_MAX_DIMS); aclnn_cast(ctx, acl_src1_tensor, acl_input_tensor, ACL_FLOAT16); ggml_cann_release_resources(ctx, acl_input_tensor, acl_src1_tensor); } // output - size_t output_elem_size = sizeof(uint16_t); - size_t output_nb[] = {output_elem_size, dst->ne[0] * output_elem_size}; + size_t output_elem_size = sizeof(uint16_t); + size_t output_nb[] = { output_elem_size, dst->ne[0] * output_elem_size }; ggml_cann_pool_alloc output_allocator(ctx.pool()); - void* output_buffer = - output_allocator.alloc(ggml_nelements(dst) * output_elem_size); - size_t output_stride = dst->ne[0] * dst->ne[1] * output_elem_size; + void * output_buffer = output_allocator.alloc(ggml_nelements(dst) * output_elem_size); + size_t output_stride = dst->ne[0] * dst->ne[1] * output_elem_size; // aclnn - int64_t max_elem_size = 65535; - int64_t split_size = (src0->ne[1] / max_elem_size) + 1; + int64_t max_elem_size = 65535; + int64_t split_size = (src0->ne[1] / max_elem_size) + 1; ggml_cann_pool_alloc workspace_allocator(ctx.pool()); for (int64_t n1 = 0; n1 < src1->ne[3]; n1++) { for (int64_t c1 = 0; c1 < src1->ne[2]; c1++) { @@ -2077,71 +2008,57 @@ static void ggml_cann_mul_mat_quant(ggml_backend_cann_context& ctx, int64_t batch1 = (n1 * src1->ne[2]) + c1; int64_t batch0 = (n0 * src0->ne[2]) + c0; - aclTensor* acl_input_tensor = ggml_cann_create_tensor( - (char*)input_buffer + batch1 * input_stride, ACL_FLOAT16, - input_elem_size, input_ne, input_nb, 2); + aclTensor * acl_input_tensor = ggml_cann_create_tensor((char *) input_buffer + batch1 * input_stride, + ACL_FLOAT16, input_elem_size, input_ne, input_nb, 2); // first split int64_t weight_ne_offset = 0; - int64_t weight_ne[2] = { - max_elem_size > src0->ne[1] ? src0->ne[1] : max_elem_size, - src0->ne[0]}; - int64_t scale_ne_offset = 0; - int64_t scale_ne[2] = {weight_ne[0], weight_ne[1] / QK8_0}; + int64_t weight_ne[2] = { max_elem_size > src0->ne[1] ? src0->ne[1] : max_elem_size, src0->ne[0] }; + int64_t scale_ne_offset = 0; + int64_t scale_ne[2] = { weight_ne[0], weight_ne[1] / QK8_0 }; int64_t output_ne_offset = 0; - int64_t output_ne[2] = {weight_ne[0], dst->ne[1]}; - - aclTensor* acl_weight_tensor = ggml_cann_create_tensor( - (char*)src0->data + batch0 * weight_stride, - ggml_cann_type_mapping(type), weight_elem_size, weight_ne, - weight_nb, 2, ACL_FORMAT_ND, weight_ne_offset); - aclTensor* acl_scale_tensor = ggml_cann_create_tensor( - scale_offset + batch0 * scale_stride, ACL_FLOAT16, - scale_elem_size, scale_ne, scale_nb, 2, ACL_FORMAT_ND, - scale_ne_offset); - aclTensor* acl_output_tensor = ggml_cann_create_tensor( - (char*)output_buffer + batch1 * output_stride, ACL_FLOAT16, - output_elem_size, output_ne, output_nb, 2, ACL_FORMAT_ND, - output_ne_offset); + int64_t output_ne[2] = { weight_ne[0], dst->ne[1] }; + + aclTensor * acl_weight_tensor = + ggml_cann_create_tensor((char *) src0->data + batch0 * weight_stride, ggml_cann_type_mapping(type), + weight_elem_size, weight_ne, weight_nb, 2, ACL_FORMAT_ND, weight_ne_offset); + aclTensor * acl_scale_tensor = + ggml_cann_create_tensor(scale_offset + batch0 * scale_stride, ACL_FLOAT16, scale_elem_size, scale_ne, + scale_nb, 2, ACL_FORMAT_ND, scale_ne_offset); + aclTensor * acl_output_tensor = + ggml_cann_create_tensor((char *) output_buffer + batch1 * output_stride, ACL_FLOAT16, output_elem_size, + output_ne, output_nb, 2, ACL_FORMAT_ND, output_ne_offset); int64_t antiquantGroupSize = 0; if (src0->ne[0] > QK8_0) { antiquantGroupSize = QK8_0; } - GGML_CANN_CALL_ACLNN_OP(ctx, WeightQuantBatchMatmulV2, acl_input_tensor, - acl_weight_tensor, acl_scale_tensor, nullptr, - nullptr, nullptr, nullptr, antiquantGroupSize, - acl_output_tensor); + GGML_CANN_CALL_ACLNN_OP(ctx, WeightQuantBatchMatmulV2, acl_input_tensor, acl_weight_tensor, + acl_scale_tensor, nullptr, nullptr, nullptr, nullptr, antiquantGroupSize, + acl_output_tensor); ggml_cann_release_resources(ctx, acl_weight_tensor, acl_scale_tensor, acl_output_tensor); // other splits for (int64_t split = 1; split < split_size; split++) { - weight_ne_offset += - weight_elem_size * weight_ne[0] * weight_ne[1]; - weight_ne[0] = max_elem_size * (split + 1) > src0->ne[1] - ? src0->ne[1] - (max_elem_size * split) - : max_elem_size; + weight_ne_offset += weight_elem_size * weight_ne[0] * weight_ne[1]; + weight_ne[0] = + max_elem_size * (split + 1) > src0->ne[1] ? src0->ne[1] - (max_elem_size * split) : max_elem_size; scale_ne_offset += scale_elem_size * scale_ne[0] * scale_ne[1]; scale_ne[0] = weight_ne[0]; - output_ne_offset += - output_elem_size * output_ne[0] * output_ne[1]; + output_ne_offset += output_elem_size * output_ne[0] * output_ne[1]; output_ne[0] = weight_ne[0]; - acl_weight_tensor = ggml_cann_create_tensor( - (char*)src0->data + batch0 * weight_stride, - ggml_cann_type_mapping(type), weight_elem_size, weight_ne, - weight_nb, 2, ACL_FORMAT_ND, weight_ne_offset); - acl_scale_tensor = ggml_cann_create_tensor( - scale_offset + batch0 * scale_stride, ACL_FLOAT16, - scale_elem_size, scale_ne, scale_nb, 2, ACL_FORMAT_ND, - scale_ne_offset); - acl_output_tensor = ggml_cann_create_tensor( - (char*)output_buffer + batch1 * output_stride, ACL_FLOAT16, - output_elem_size, output_ne, output_nb, 2, ACL_FORMAT_ND, - output_ne_offset); - GGML_CANN_CALL_ACLNN_OP(ctx, WeightQuantBatchMatmulV2, acl_input_tensor, - acl_weight_tensor, acl_scale_tensor, nullptr, - nullptr, nullptr, nullptr, antiquantGroupSize, - acl_output_tensor); + acl_weight_tensor = + ggml_cann_create_tensor((char *) src0->data + batch0 * weight_stride, ggml_cann_type_mapping(type), + weight_elem_size, weight_ne, weight_nb, 2, ACL_FORMAT_ND, weight_ne_offset); + acl_scale_tensor = + ggml_cann_create_tensor(scale_offset + batch0 * scale_stride, ACL_FLOAT16, scale_elem_size, + scale_ne, scale_nb, 2, ACL_FORMAT_ND, scale_ne_offset); + acl_output_tensor = + ggml_cann_create_tensor((char *) output_buffer + batch1 * output_stride, ACL_FLOAT16, + output_elem_size, output_ne, output_nb, 2, ACL_FORMAT_ND, output_ne_offset); + GGML_CANN_CALL_ACLNN_OP(ctx, WeightQuantBatchMatmulV2, acl_input_tensor, acl_weight_tensor, + acl_scale_tensor, nullptr, nullptr, nullptr, nullptr, antiquantGroupSize, + acl_output_tensor); ggml_cann_release_resources(ctx, acl_weight_tensor, acl_scale_tensor, acl_output_tensor); } @@ -2151,24 +2068,23 @@ static void ggml_cann_mul_mat_quant(ggml_backend_cann_context& ctx, // cast out if (dst->type != GGML_TYPE_F16) { - int64_t* output_cast_ne = dst->ne; - size_t output_cast_nb[GGML_MAX_DIMS]; + int64_t * output_cast_ne = dst->ne; + size_t output_cast_nb[GGML_MAX_DIMS]; output_cast_nb[0] = sizeof(uint16_t); for (int i = 1; i < GGML_MAX_DIMS; i++) { output_cast_nb[i] = output_cast_nb[i - 1] * output_cast_ne[i - 1]; } - aclTensor* acl_output_tensor = ggml_cann_create_tensor( - output_buffer, ACL_FLOAT16, output_elem_size, output_cast_ne, - output_cast_nb, GGML_MAX_DIMS); - aclTensor* acl_dst_tensor = ggml_cann_create_tensor(dst); + aclTensor * acl_output_tensor = ggml_cann_create_tensor(output_buffer, ACL_FLOAT16, output_elem_size, + output_cast_ne, output_cast_nb, GGML_MAX_DIMS); + aclTensor * acl_dst_tensor = ggml_cann_create_tensor(dst); aclnn_cast(ctx, acl_output_tensor, acl_dst_tensor, ggml_cann_type_mapping(dst->type)); ggml_cann_release_resources(ctx, acl_output_tensor, acl_dst_tensor); } } -void ggml_cann_mul_mat(ggml_backend_cann_context& ctx, ggml_tensor* dst) { +void ggml_cann_mul_mat(ggml_backend_cann_context & ctx, ggml_tensor * dst) { const enum ggml_type type = dst->src[0]->type; switch (type) { case GGML_TYPE_F32: @@ -2201,10 +2117,13 @@ void ggml_cann_mul_mat(ggml_backend_cann_context& ctx, ggml_tensor* dst) { * @param dims An array specifying the dimensions along which elements are * shifted. */ -static void aclnn_roll(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_dst, int64_t* shifts, int64_t* dims) { - aclIntArray* acl_shifts = aclCreateIntArray(shifts, 1); - aclIntArray* acl_dims = aclCreateIntArray(dims, 1); +static void aclnn_roll(ggml_backend_cann_context & ctx, + aclTensor * acl_src, + aclTensor * acl_dst, + int64_t * shifts, + int64_t * dims) { + aclIntArray * acl_shifts = aclCreateIntArray(shifts, 1); + aclIntArray * acl_dims = aclCreateIntArray(dims, 1); GGML_CANN_CALL_ACLNN_OP(ctx, Roll, acl_src, acl_shifts, acl_dims, acl_dst); ggml_cann_release_resources(ctx, acl_shifts, acl_dims); } @@ -2222,12 +2141,14 @@ static void aclnn_roll(ggml_backend_cann_context& ctx, aclTensor* acl_src, * @param index_num The number of positions specified in the index array. * @param value The scalar value used to fill the specified positions. */ -static void aclnn_index_fill_tensor(ggml_backend_cann_context& ctx, - aclTensor* acl_src, int64_t dim, - int64_t* index, int64_t index_num, - float value) { - aclIntArray* acl_index = aclCreateIntArray(index, index_num); - aclScalar* acl_value = aclCreateScalar(&value, aclDataType::ACL_FLOAT); +static void aclnn_index_fill_tensor(ggml_backend_cann_context & ctx, + aclTensor * acl_src, + int64_t dim, + int64_t * index, + int64_t index_num, + float value) { + aclIntArray * acl_index = aclCreateIntArray(index, index_num); + aclScalar * acl_value = aclCreateScalar(&value, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceIndexFillTensor, acl_src, dim, acl_index, acl_value); ggml_cann_release_resources(ctx, acl_index, acl_value); } @@ -2262,85 +2183,82 @@ static void aclnn_index_fill_tensor(ggml_backend_cann_context& ctx, * @param is_neox Whether to use Neox-style repeat strategy * (dim expansion vs repeat_interleave). */ -static void aclnn_cache_init(ggml_backend_cann_context& ctx, ggml_tensor* dst, - float* corr_dims, float ext_factor, - float theta_scale, float freq_scale, - float attn_factor, bool is_neox) { - ggml_tensor* src0 = dst->src[0]; // input - ggml_tensor* src1 = dst->src[1]; // position - ggml_tensor* src2 = dst->src[2]; // freq_factors - - if(src2 == nullptr && ctx.rope_cache.cached - && ctx.rope_cache.ext_factor == ext_factor - && ctx.rope_cache.theta_scale == theta_scale - && ctx.rope_cache.freq_scale == freq_scale - && ctx.rope_cache.attn_factor == attn_factor - && ctx.rope_cache.is_neox == is_neox) { +static void aclnn_cache_init(ggml_backend_cann_context & ctx, + ggml_tensor * dst, + float * corr_dims, + float ext_factor, + float theta_scale, + float freq_scale, + float attn_factor, + bool is_neox) { + ggml_tensor * src0 = dst->src[0]; // input + ggml_tensor * src1 = dst->src[1]; // position + ggml_tensor * src2 = dst->src[2]; // freq_factors + + if (src2 == nullptr && ctx.rope_cache.cached && ctx.rope_cache.ext_factor == ext_factor && + ctx.rope_cache.theta_scale == theta_scale && ctx.rope_cache.freq_scale == freq_scale && + ctx.rope_cache.attn_factor == attn_factor && ctx.rope_cache.is_neox == is_neox) { // use cache. return; } int64_t theta_scale_length = src0->ne[0] / 2; - int64_t theta_scale_ne[] = {theta_scale_length, 1, 1, 1}; - size_t theta_scale_nb[] = {sizeof(float), sizeof(float), sizeof(float), - theta_scale_length * sizeof(float)}; + int64_t theta_scale_ne[] = { theta_scale_length, 1, 1, 1 }; + size_t theta_scale_nb[] = { sizeof(float), sizeof(float), sizeof(float), theta_scale_length * sizeof(float) }; GGML_ASSERT(src1->type == GGML_TYPE_I32); int64_t position_length = src1->ne[0]; - int64_t position_ne[] = {1, 1, position_length, 1}; - size_t position_nb[] = {sizeof(int32_t), sizeof(int32_t), sizeof(int32_t), - sizeof(int32_t) * position_length}; + int64_t position_ne[] = { 1, 1, position_length, 1 }; + size_t position_nb[] = { sizeof(int32_t), sizeof(int32_t), sizeof(int32_t), sizeof(int32_t) * position_length }; - int64_t theta_ne[] = {theta_scale_length, 1, position_length, 1}; - size_t theta_nb[GGML_MAX_DIMS]; + int64_t theta_ne[] = { theta_scale_length, 1, position_length, 1 }; + size_t theta_nb[GGML_MAX_DIMS]; theta_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { theta_nb[i] = theta_nb[i - 1] * theta_ne[i - 1]; } // theta_scale arange, [0,1,...,ne00/2 - 1] - aclTensor* acl_theta_scale_tensor = nullptr; + aclTensor * acl_theta_scale_tensor = nullptr; // cache theta scale if (ctx.rope_cache.theta_scale_length != theta_scale_length || // theta_scale and freq_scale should not change during the current token inference process, // so we can directly use == here instead of comparing the absolute difference. - ctx.rope_cache.theta_scale != theta_scale || - ctx.rope_cache.freq_scale != freq_scale) { - + ctx.rope_cache.theta_scale != theta_scale || ctx.rope_cache.freq_scale != freq_scale) { ctx.rope_cache.theta_scale_length = theta_scale_length; if (ctx.rope_cache.theta_scale_cache != nullptr) { ACL_CHECK(aclrtFree(ctx.rope_cache.theta_scale_cache)); } - ACL_CHECK(aclrtMalloc(&ctx.rope_cache.theta_scale_cache, theta_scale_length * sizeof(float), ACL_MEM_MALLOC_HUGE_FIRST)); + ACL_CHECK(aclrtMalloc(&ctx.rope_cache.theta_scale_cache, theta_scale_length * sizeof(float), + ACL_MEM_MALLOC_HUGE_FIRST)); - acl_theta_scale_tensor = - ggml_cann_create_tensor(ctx.rope_cache.theta_scale_cache, ACL_FLOAT, sizeof(float), - theta_scale_ne, theta_scale_nb, GGML_MAX_DIMS); + acl_theta_scale_tensor = ggml_cann_create_tensor(ctx.rope_cache.theta_scale_cache, ACL_FLOAT, sizeof(float), + theta_scale_ne, theta_scale_nb, GGML_MAX_DIMS); - float start = 0; - float step = 1; - float stop = theta_scale_length; + float start = 0; + float step = 1; + float stop = theta_scale_length; float n_elements = theta_scale_length; aclnn_arange(ctx, acl_theta_scale_tensor, start, stop, step, n_elements); ggml_cann_pool_alloc yarn_ramp_allocator(ctx.pool()); - aclTensor* acl_yarn_ramp_tensor = nullptr; + aclTensor * acl_yarn_ramp_tensor = nullptr; if (ext_factor != 0) { // -rope_yarn_ramp // const float y = (i0 / 2 - low) / MAX(0.001f, high - low); // return MIN(1, MAX(0, y)) - 1; yarn_ramp_allocator.alloc(theta_scale_length * sizeof(float)); - void* yarn_ramp_buffer = yarn_ramp_allocator.get(); - acl_yarn_ramp_tensor = ggml_cann_create_tensor(yarn_ramp_buffer, ACL_FLOAT, sizeof(float), - theta_scale_ne, theta_scale_nb, GGML_MAX_DIMS); - float zero_value = 0, one_value = 1; - float denom_safe_value = MAX(0.001f, corr_dims[1] - corr_dims[0]); - aclScalar* low = aclCreateScalar(&corr_dims[0], aclDataType::ACL_FLOAT); - aclScalar* zero = aclCreateScalar(&zero_value, aclDataType::ACL_FLOAT); - aclScalar* one = aclCreateScalar(&one_value, aclDataType::ACL_FLOAT); - aclScalar* denom_safe = aclCreateScalar(&denom_safe_value, aclDataType::ACL_FLOAT); - aclScalar* ext_factor_sc = aclCreateScalar(&ext_factor, aclDataType::ACL_FLOAT); + void * yarn_ramp_buffer = yarn_ramp_allocator.get(); + acl_yarn_ramp_tensor = ggml_cann_create_tensor(yarn_ramp_buffer, ACL_FLOAT, sizeof(float), theta_scale_ne, + theta_scale_nb, GGML_MAX_DIMS); + float zero_value = 0, one_value = 1; + float denom_safe_value = MAX(0.001f, corr_dims[1] - corr_dims[0]); + aclScalar * low = aclCreateScalar(&corr_dims[0], aclDataType::ACL_FLOAT); + aclScalar * zero = aclCreateScalar(&zero_value, aclDataType::ACL_FLOAT); + aclScalar * one = aclCreateScalar(&one_value, aclDataType::ACL_FLOAT); + aclScalar * denom_safe = aclCreateScalar(&denom_safe_value, aclDataType::ACL_FLOAT); + aclScalar * ext_factor_sc = aclCreateScalar(&ext_factor, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, Subs, acl_theta_scale_tensor, low, one, acl_yarn_ramp_tensor); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceDivs, acl_yarn_ramp_tensor, denom_safe); @@ -2357,9 +2275,9 @@ static void aclnn_cache_init(ggml_backend_cann_context& ctx, ggml_tensor* dst, // // we cache (freq_scale - freq_scale * ramp_mix + ramp_mix), Considering that the rope_yarn_ramp here is the inverse // cache freq_scale + (freq_scale - 1) * ramp_mix - float freq_scale_1 = freq_scale - 1; - aclScalar* freq_scale_sc = aclCreateScalar(&freq_scale, aclDataType::ACL_FLOAT); - aclScalar* freq_scale_1_sc = aclCreateScalar(&freq_scale_1, aclDataType::ACL_FLOAT); + float freq_scale_1 = freq_scale - 1; + aclScalar * freq_scale_sc = aclCreateScalar(&freq_scale, aclDataType::ACL_FLOAT); + aclScalar * freq_scale_1_sc = aclCreateScalar(&freq_scale_1, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceMuls, acl_yarn_ramp_tensor, freq_scale_1_sc); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceAdds, acl_yarn_ramp_tensor, freq_scale_sc, one); @@ -2367,9 +2285,8 @@ static void aclnn_cache_init(ggml_backend_cann_context& ctx, ggml_tensor* dst, } // power - aclScalar* acl_theta_scale = aclCreateScalar(&theta_scale, aclDataType::ACL_FLOAT); - GGML_CANN_CALL_ACLNN_OP(ctx, PowScalarTensor, acl_theta_scale, acl_theta_scale_tensor, - acl_theta_scale_tensor); + aclScalar * acl_theta_scale = aclCreateScalar(&theta_scale, aclDataType::ACL_FLOAT); + GGML_CANN_CALL_ACLNN_OP(ctx, PowScalarTensor, acl_theta_scale, acl_theta_scale_tensor, acl_theta_scale_tensor); if (ext_factor != 0) { aclnn_mul(ctx, acl_theta_scale_tensor, acl_yarn_ramp_tensor); @@ -2380,22 +2297,20 @@ static void aclnn_cache_init(ggml_backend_cann_context& ctx, ggml_tensor* dst, ggml_cann_release_resources(ctx, acl_yarn_ramp_tensor, acl_theta_scale); } else { // use cache - acl_theta_scale_tensor = - ggml_cann_create_tensor(ctx.rope_cache.theta_scale_cache, ACL_FLOAT, sizeof(float), - theta_scale_ne, theta_scale_nb, GGML_MAX_DIMS); + acl_theta_scale_tensor = ggml_cann_create_tensor(ctx.rope_cache.theta_scale_cache, ACL_FLOAT, sizeof(float), + theta_scale_ne, theta_scale_nb, GGML_MAX_DIMS); } ggml_cann_pool_alloc freq_fac_res_allocator(ctx.pool()); // freq_factors if (src2) { freq_fac_res_allocator.alloc(theta_scale_length * sizeof(float)); - void* freq_fac_res_ptr = freq_fac_res_allocator.get(); - aclTensor* acl_freq_factors_tensor = ggml_cann_create_tensor( - src2->data, ggml_cann_type_mapping(src2->type), - ggml_type_size(src2->type), theta_scale_ne, theta_scale_nb, GGML_MAX_DIMS); - aclTensor* acl_freq_fac_res_tensor = ggml_cann_create_tensor( - freq_fac_res_ptr, ACL_FLOAT, sizeof(float), - theta_scale_ne, theta_scale_nb, GGML_MAX_DIMS); + void * freq_fac_res_ptr = freq_fac_res_allocator.get(); + aclTensor * acl_freq_factors_tensor = + ggml_cann_create_tensor(src2->data, ggml_cann_type_mapping(src2->type), ggml_type_size(src2->type), + theta_scale_ne, theta_scale_nb, GGML_MAX_DIMS); + aclTensor * acl_freq_fac_res_tensor = ggml_cann_create_tensor(freq_fac_res_ptr, ACL_FLOAT, sizeof(float), + theta_scale_ne, theta_scale_nb, GGML_MAX_DIMS); aclnn_div(ctx, acl_theta_scale_tensor, acl_freq_factors_tensor, acl_freq_fac_res_tensor); std::swap(acl_theta_scale_tensor, acl_freq_fac_res_tensor); ggml_cann_release_resources(ctx, acl_freq_factors_tensor, acl_freq_fac_res_tensor); @@ -2411,42 +2326,37 @@ static void aclnn_cache_init(ggml_backend_cann_context& ctx, ggml_tensor* dst, ACL_CHECK(aclrtFree(ctx.rope_cache.cos_cache)); } int64_t repeat_theta_length = theta_scale_length * position_length * 2; - ACL_CHECK(aclrtMalloc(&ctx.rope_cache.sin_cache, repeat_theta_length * sizeof(float), ACL_MEM_MALLOC_HUGE_FIRST)); - ACL_CHECK(aclrtMalloc(&ctx.rope_cache.cos_cache, repeat_theta_length * sizeof(float), ACL_MEM_MALLOC_HUGE_FIRST)); + ACL_CHECK( + aclrtMalloc(&ctx.rope_cache.sin_cache, repeat_theta_length * sizeof(float), ACL_MEM_MALLOC_HUGE_FIRST)); + ACL_CHECK( + aclrtMalloc(&ctx.rope_cache.cos_cache, repeat_theta_length * sizeof(float), ACL_MEM_MALLOC_HUGE_FIRST)); } // position - aclTensor* acl_position_tensor = ggml_cann_create_tensor( - src1->data, ggml_cann_type_mapping(src1->type), - ggml_type_size(src1->type), position_ne, position_nb, GGML_MAX_DIMS); + aclTensor * acl_position_tensor = + ggml_cann_create_tensor(src1->data, ggml_cann_type_mapping(src1->type), ggml_type_size(src1->type), position_ne, + position_nb, GGML_MAX_DIMS); // power * position - int64_t theta_length = theta_scale_length * position_length; - ggml_cann_pool_alloc theta_allocator(ctx.pool(), - theta_length * sizeof(float)); - void* theta_buffer = theta_allocator.get(); + int64_t theta_length = theta_scale_length * position_length; + ggml_cann_pool_alloc theta_allocator(ctx.pool(), theta_length * sizeof(float)); + void * theta_buffer = theta_allocator.get(); - aclTensor* acl_theta_tensor = - ggml_cann_create_tensor(theta_buffer, ACL_FLOAT, sizeof(float), - theta_ne, theta_nb, GGML_MAX_DIMS); - aclnn_mul(ctx, acl_position_tensor, acl_theta_scale_tensor, - acl_theta_tensor); + aclTensor * acl_theta_tensor = + ggml_cann_create_tensor(theta_buffer, ACL_FLOAT, sizeof(float), theta_ne, theta_nb, GGML_MAX_DIMS); + aclnn_mul(ctx, acl_position_tensor, acl_theta_scale_tensor, acl_theta_tensor); // sin/cos - ggml_cann_pool_alloc sin_allocator(ctx.pool(), - theta_length * sizeof(float)); - void* sin_buffer = sin_allocator.get(); - aclTensor* acl_sin_tensor = ggml_cann_create_tensor( - sin_buffer, ACL_FLOAT, sizeof(float), theta_ne, theta_nb, - GGML_MAX_DIMS, ACL_FORMAT_ND); + ggml_cann_pool_alloc sin_allocator(ctx.pool(), theta_length * sizeof(float)); + void * sin_buffer = sin_allocator.get(); + aclTensor * acl_sin_tensor = + ggml_cann_create_tensor(sin_buffer, ACL_FLOAT, sizeof(float), theta_ne, theta_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); aclnn_sin(ctx, acl_theta_tensor, acl_sin_tensor); - ggml_cann_pool_alloc cos_allocator(ctx.pool(), - theta_length * sizeof(float)); - void* cos_buffer = cos_allocator.get(); - aclTensor* acl_cos_tensor = ggml_cann_create_tensor( - cos_buffer, ACL_FLOAT, sizeof(float), theta_ne, theta_nb, - GGML_MAX_DIMS, ACL_FORMAT_ND); + ggml_cann_pool_alloc cos_allocator(ctx.pool(), theta_length * sizeof(float)); + void * cos_buffer = cos_allocator.get(); + aclTensor * acl_cos_tensor = + ggml_cann_create_tensor(cos_buffer, ACL_FLOAT, sizeof(float), theta_ne, theta_nb, GGML_MAX_DIMS, ACL_FORMAT_ND); aclnn_cos(ctx, acl_theta_tensor, acl_cos_tensor); if (ext_factor != 0) { @@ -2459,81 +2369,79 @@ static void aclnn_cache_init(ggml_backend_cann_context& ctx, ggml_tensor* dst, aclnn_muls(ctx, acl_cos_tensor, attn_factor, nullptr, true); } - int64_t sin_reshape_ne[4] = {src0->ne[0], 1, src0->ne[2], 1}; - size_t sin_reshape_nb[GGML_MAX_DIMS]; + int64_t sin_reshape_ne[4] = { src0->ne[0], 1, src0->ne[2], 1 }; + size_t sin_reshape_nb[GGML_MAX_DIMS]; sin_reshape_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { sin_reshape_nb[i] = sin_reshape_nb[i - 1] * sin_reshape_ne[i - 1]; } - aclTensor* acl_sin_repeat_tensor = - ggml_cann_create_tensor(ctx.rope_cache.sin_cache, ACL_FLOAT, sizeof(float), - sin_reshape_ne, sin_reshape_nb, GGML_MAX_DIMS); - aclTensor* acl_cos_repeat_tensor = - ggml_cann_create_tensor(ctx.rope_cache.cos_cache, ACL_FLOAT, sizeof(float), - sin_reshape_ne, sin_reshape_nb, GGML_MAX_DIMS); + aclTensor * acl_sin_repeat_tensor = ggml_cann_create_tensor(ctx.rope_cache.sin_cache, ACL_FLOAT, sizeof(float), + sin_reshape_ne, sin_reshape_nb, GGML_MAX_DIMS); + aclTensor * acl_cos_repeat_tensor = ggml_cann_create_tensor(ctx.rope_cache.cos_cache, ACL_FLOAT, sizeof(float), + sin_reshape_ne, sin_reshape_nb, GGML_MAX_DIMS); // repeat if (is_neox) { - int64_t repeatsArray[] = {1, 1, 1, 2}; + int64_t repeatsArray[] = { 1, 1, 1, 2 }; aclnn_repeat(ctx, acl_sin_tensor, acl_sin_repeat_tensor, repeatsArray); aclnn_repeat(ctx, acl_cos_tensor, acl_cos_repeat_tensor, repeatsArray); } else { int64_t num_repeats = 2; - int64_t dim = 3; + int64_t dim = 3; int64_t output_size = theta_scale_length * num_repeats; - aclnn_repeat_interleave(ctx, acl_sin_tensor, acl_sin_repeat_tensor, dim, - num_repeats, output_size); - aclnn_repeat_interleave(ctx, acl_cos_tensor, acl_cos_repeat_tensor, dim, - num_repeats, output_size); + aclnn_repeat_interleave(ctx, acl_sin_tensor, acl_sin_repeat_tensor, dim, num_repeats, output_size); + aclnn_repeat_interleave(ctx, acl_cos_tensor, acl_cos_repeat_tensor, dim, num_repeats, output_size); } // Other layers use cache except first layer. - ctx.rope_cache.cached = true; - ctx.rope_cache.ext_factor = ext_factor; + ctx.rope_cache.cached = true; + ctx.rope_cache.ext_factor = ext_factor; ctx.rope_cache.theta_scale = theta_scale; - ctx.rope_cache.freq_scale = freq_scale; + ctx.rope_cache.freq_scale = freq_scale; ctx.rope_cache.attn_factor = attn_factor; - ctx.rope_cache.is_neox = is_neox; + ctx.rope_cache.is_neox = is_neox; - ggml_cann_release_resources(ctx, acl_theta_scale_tensor, acl_position_tensor, - acl_theta_tensor, acl_sin_tensor, acl_sin_repeat_tensor, acl_cos_tensor, - acl_cos_repeat_tensor); + ggml_cann_release_resources(ctx, acl_theta_scale_tensor, acl_position_tensor, acl_theta_tensor, acl_sin_tensor, + acl_sin_repeat_tensor, acl_cos_tensor, acl_cos_repeat_tensor); } #ifdef __cplusplus extern "C" { #endif -aclnnStatus aclnnRotaryPositionEmbeddingGetWorkspaceSize( - const aclTensor* x, const aclTensor* cos, const aclTensor* sin, - int64_t mode, const aclTensor* yOut, uint64_t* workspaceSize, - aclOpExecutor** executor); -aclnnStatus aclnnRotaryPositionEmbedding(void* workspace, - uint64_t workspaceSize, - aclOpExecutor* executor, - aclrtStream stream); +aclnnStatus aclnnRotaryPositionEmbeddingGetWorkspaceSize(const aclTensor * x, + const aclTensor * cos, + const aclTensor * sin, + int64_t mode, + const aclTensor * yOut, + uint64_t * workspaceSize, + aclOpExecutor ** executor); +aclnnStatus aclnnRotaryPositionEmbedding(void * workspace, + uint64_t workspaceSize, + aclOpExecutor * executor, + aclrtStream stream); #ifdef __cplusplus } #endif -void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src0 = dst->src[0]; // input +void ggml_cann_rope(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src0 = dst->src[0]; // input // param - float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; + float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; // const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_dims = ((int32_t*)dst->op_params)[1]; - const int mode = ((int32_t*)dst->op_params)[2]; + const int n_dims = ((int32_t *) dst->op_params)[1]; + const int mode = ((int32_t *) dst->op_params)[2]; // const int n_ctx = ((int32_t *) dst->op_params)[3]; - const int n_ctx_orig = ((int32_t*)dst->op_params)[4]; + const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; GGML_TENSOR_UNARY_OP_LOCALS - memcpy(&freq_base, (int32_t*)dst->op_params + 5, sizeof(float)); - memcpy(&freq_scale, (int32_t*)dst->op_params + 6, sizeof(float)); - memcpy(&ext_factor, (int32_t*)dst->op_params + 7, sizeof(float)); - memcpy(&attn_factor, (int32_t*)dst->op_params + 8, sizeof(float)); - memcpy(&beta_fast, (int32_t*)dst->op_params + 9, sizeof(float)); - memcpy(&beta_slow, (int32_t*)dst->op_params + 10, sizeof(float)); + memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); + memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); + memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); + memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); + memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); + memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); // TODO: n_dims <= ne0 GGML_ASSERT(n_dims == ne0); @@ -2542,123 +2450,111 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { const float theta_scale = powf(freq_base, -2.0f / n_dims); float corr_dims[2]; - ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, - beta_slow, corr_dims); + ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); const bool is_neox = mode & GGML_ROPE_TYPE_NEOX; // init ctx.rope_cos/rope_sin cache - aclnn_cache_init(ctx, dst, corr_dims, ext_factor, - theta_scale, freq_scale, attn_factor, is_neox); + aclnn_cache_init(ctx, dst, corr_dims, ext_factor, theta_scale, freq_scale, attn_factor, is_neox); - int64_t sin_reshape_ne[4] = {ne00, 1, ne02, 1}; - size_t sin_reshape_nb[GGML_MAX_DIMS]; + int64_t sin_reshape_ne[4] = { ne00, 1, ne02, 1 }; + size_t sin_reshape_nb[GGML_MAX_DIMS]; sin_reshape_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { sin_reshape_nb[i] = sin_reshape_nb[i - 1] * sin_reshape_ne[i - 1]; } - aclTensor* acl_sin_reshape_tensor = - ggml_cann_create_tensor(ctx.rope_cache.sin_cache, ACL_FLOAT, sizeof(float), - sin_reshape_ne, sin_reshape_nb, GGML_MAX_DIMS); - aclTensor* acl_cos_reshape_tensor = - ggml_cann_create_tensor(ctx.rope_cache.cos_cache, ACL_FLOAT, sizeof(float), - sin_reshape_ne, sin_reshape_nb, GGML_MAX_DIMS); + aclTensor * acl_sin_reshape_tensor = ggml_cann_create_tensor(ctx.rope_cache.sin_cache, ACL_FLOAT, sizeof(float), + sin_reshape_ne, sin_reshape_nb, GGML_MAX_DIMS); + aclTensor * acl_cos_reshape_tensor = ggml_cann_create_tensor(ctx.rope_cache.cos_cache, ACL_FLOAT, sizeof(float), + sin_reshape_ne, sin_reshape_nb, GGML_MAX_DIMS); - aclTensor* acl_src = ggml_cann_create_tensor(src0); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src = ggml_cann_create_tensor(src0); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); #ifdef ASCEND_310P // Special ROPE operation for 310P // roll input - void* input_roll_buffer; - aclTensor* acl_minus_one_tensor; - void* minus_one_scale_buffer = nullptr; + void * input_roll_buffer; + aclTensor * acl_minus_one_tensor; + void * minus_one_scale_buffer = nullptr; ggml_cann_pool_alloc roll_allocator(ctx.pool(), ggml_nbytes(src0)); - ggml_cann_pool_alloc minus_one_scale_allocator( - ctx.pool(), sizeof(float) * src0->ne[0]); + ggml_cann_pool_alloc minus_one_scale_allocator(ctx.pool(), sizeof(float) * src0->ne[0]); if (!is_neox) { // roll input: [q0,q1,q2,q3,...] -> [q1,q0,q3,q2,...] - input_roll_buffer = roll_allocator.get(); - int64_t input_roll_ne[4] = {2, src0->ne[1] * (src0->ne[0] / 2), - src0->ne[2], src0->ne[3]}; - size_t input_roll_nb[GGML_MAX_DIMS]; + input_roll_buffer = roll_allocator.get(); + int64_t input_roll_ne[4] = { 2, src0->ne[1] * (src0->ne[0] / 2), src0->ne[2], src0->ne[3] }; + size_t input_roll_nb[GGML_MAX_DIMS]; input_roll_nb[0] = ggml_type_size(src0->type); for (int i = 1; i < GGML_MAX_DIMS; i++) { input_roll_nb[i] = input_roll_nb[i - 1] * input_roll_ne[i - 1]; } - aclTensor* acl_input_roll_tensor = ggml_cann_create_tensor( - input_roll_buffer, ggml_cann_type_mapping(src0->type), - ggml_type_size(src0->type), input_roll_ne, input_roll_nb, - GGML_MAX_DIMS); - aclTensor* acl_input_tensor = ggml_cann_create_tensor( - src0->data, ggml_cann_type_mapping(src0->type), - ggml_type_size(src0->type), input_roll_ne, input_roll_nb, - GGML_MAX_DIMS); - - int64_t shifts[] = {1}; - int64_t dims[] = {3}; + aclTensor * acl_input_roll_tensor = + ggml_cann_create_tensor(input_roll_buffer, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), + input_roll_ne, input_roll_nb, GGML_MAX_DIMS); + aclTensor * acl_input_tensor = + ggml_cann_create_tensor(src0->data, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), + input_roll_ne, input_roll_nb, GGML_MAX_DIMS); + + int64_t shifts[] = { 1 }; + int64_t dims[] = { 3 }; aclnn_roll(ctx, acl_input_tensor, acl_input_roll_tensor, shifts, dims); ggml_cann_release_resources(ctx, acl_input_roll_tensor, acl_input_tensor); // init [-1, 1, -1, 1, ...] minus_one_scale_buffer = minus_one_scale_allocator.get(); - int64_t minus_one_ne[4] = {src0->ne[0], 1, 1, 1}; - size_t minus_one_nb[GGML_MAX_DIMS]; + int64_t minus_one_ne[4] = { src0->ne[0], 1, 1, 1 }; + size_t minus_one_nb[GGML_MAX_DIMS]; minus_one_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { minus_one_nb[i] = minus_one_nb[i - 1] * minus_one_ne[i - 1]; } - acl_minus_one_tensor = aclnn_values( - ctx, minus_one_scale_buffer, sizeof(float) * src0->ne[0], - minus_one_ne, GGML_MAX_DIMS, ACL_FLOAT, sizeof(float), 1); - int64_t dim = 3; - int64_t* index = new int64_t[src0->ne[0]]; + acl_minus_one_tensor = aclnn_values(ctx, minus_one_scale_buffer, sizeof(float) * src0->ne[0], minus_one_ne, + GGML_MAX_DIMS, ACL_FLOAT, sizeof(float), 1); + int64_t dim = 3; + int64_t * index = new int64_t[src0->ne[0]]; for (int i = 0; i < src0->ne[0]; i++) { index[i] = i / 2 * 2; } int64_t index_num = src0->ne[0]; - float value = -1; - aclnn_index_fill_tensor(ctx, acl_minus_one_tensor, dim, index, - index_num, value); + float value = -1; + aclnn_index_fill_tensor(ctx, acl_minus_one_tensor, dim, index, index_num, value); } else { // roll input: [q0,q1,q2,...] -> // [q_half,q_half+1,...,q_end,q0,q1,...q_half-1] input_roll_buffer = roll_allocator.get(); - aclTensor* acl_input_roll_tensor = ggml_cann_create_tensor( - input_roll_buffer, ggml_cann_type_mapping(src0->type), - ggml_type_size(src0->type), src0->ne, src0->nb, GGML_MAX_DIMS); - aclTensor* acl_input_tensor = ggml_cann_create_tensor(src0); + aclTensor * acl_input_roll_tensor = + ggml_cann_create_tensor(input_roll_buffer, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), + src0->ne, src0->nb, GGML_MAX_DIMS); + aclTensor * acl_input_tensor = ggml_cann_create_tensor(src0); - int64_t shifts[] = {src0->ne[0] / 2}; - int64_t dims[] = {3}; + int64_t shifts[] = { src0->ne[0] / 2 }; + int64_t dims[] = { 3 }; aclnn_roll(ctx, acl_input_tensor, acl_input_roll_tensor, shifts, dims); ggml_cann_release_resources(ctx, acl_input_roll_tensor, acl_input_tensor); // init [-1, -1, -1, 1, 1,1,...] - minus_one_scale_buffer = minus_one_scale_allocator.get(); - int64_t minus_one_ne[4] = {src0->ne[0], 1, 1, 1}; - size_t minus_one_nb[GGML_MAX_DIMS]; + minus_one_scale_buffer = minus_one_scale_allocator.get(); + int64_t minus_one_ne[4] = { src0->ne[0], 1, 1, 1 }; + size_t minus_one_nb[GGML_MAX_DIMS]; minus_one_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { minus_one_nb[i] = minus_one_nb[i - 1] * minus_one_ne[i - 1]; } - acl_minus_one_tensor = aclnn_values( - ctx, minus_one_scale_buffer, sizeof(float) * src0->ne[0], - minus_one_ne, GGML_MAX_DIMS, ACL_FLOAT, sizeof(float), 1); + acl_minus_one_tensor = aclnn_values(ctx, minus_one_scale_buffer, sizeof(float) * src0->ne[0], minus_one_ne, + GGML_MAX_DIMS, ACL_FLOAT, sizeof(float), 1); // -1 * first half - int64_t first_half_ne[4] = {src0->ne[0] / 2, 1, 1, 1}; - size_t first_half_nb[GGML_MAX_DIMS]; + int64_t first_half_ne[4] = { src0->ne[0] / 2, 1, 1, 1 }; + size_t first_half_nb[GGML_MAX_DIMS]; first_half_nb[0] = sizeof(float); for (int i = 1; i < GGML_MAX_DIMS; i++) { first_half_nb[i] = first_half_nb[i - 1] * first_half_ne[i - 1]; } - aclTensor* acl_first_half_tensor = ggml_cann_create_tensor( - minus_one_scale_buffer, ACL_FLOAT, sizeof(float), first_half_ne, - first_half_nb, GGML_MAX_DIMS); - bool inplace = true; - float scale = -1; + aclTensor * acl_first_half_tensor = ggml_cann_create_tensor(minus_one_scale_buffer, ACL_FLOAT, sizeof(float), + first_half_ne, first_half_nb, GGML_MAX_DIMS); + bool inplace = true; + float scale = -1; aclnn_muls(ctx, acl_first_half_tensor, scale, nullptr, inplace); ggml_cann_release_resources(ctx, acl_first_half_tensor); } @@ -2667,30 +2563,27 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { GGML_ASSERT(n_dims == src0->ne[0]); // input * scale - ggml_cann_pool_alloc roll_mul_scale_allocator(ctx.pool(), - ggml_nbytes(src0)); - void* input_roll_mul_scale_buffer = roll_mul_scale_allocator.get(); - size_t input_nb[GGML_MAX_DIMS]; + ggml_cann_pool_alloc roll_mul_scale_allocator(ctx.pool(), ggml_nbytes(src0)); + void * input_roll_mul_scale_buffer = roll_mul_scale_allocator.get(); + size_t input_nb[GGML_MAX_DIMS]; input_nb[0] = ggml_type_size(src0->type); for (int i = 1; i < GGML_MAX_DIMS; i++) { input_nb[i] = input_nb[i - 1] * src0->ne[i - 1]; } - aclTensor* acl_input_roll_mul_scale_tensor = ggml_cann_create_tensor( - input_roll_mul_scale_buffer, ggml_cann_type_mapping(src0->type), - ggml_type_size(src0->type), src0->ne, input_nb, GGML_MAX_DIMS); - aclTensor* acl_input_roll_reshape_tensor = ggml_cann_create_tensor( - input_roll_buffer, ggml_cann_type_mapping(src0->type), - ggml_type_size(src0->type), src0->ne, input_nb, GGML_MAX_DIMS); + aclTensor * acl_input_roll_mul_scale_tensor = + ggml_cann_create_tensor(input_roll_mul_scale_buffer, ggml_cann_type_mapping(src0->type), + ggml_type_size(src0->type), src0->ne, input_nb, GGML_MAX_DIMS); + aclTensor * acl_input_roll_reshape_tensor = + ggml_cann_create_tensor(input_roll_buffer, ggml_cann_type_mapping(src0->type), ggml_type_size(src0->type), + src0->ne, input_nb, GGML_MAX_DIMS); - aclnn_mul(ctx, acl_input_roll_reshape_tensor, acl_minus_one_tensor, - acl_input_roll_mul_scale_tensor); + aclnn_mul(ctx, acl_input_roll_reshape_tensor, acl_minus_one_tensor, acl_input_roll_mul_scale_tensor); // output - void* output_fp32_buffer; + void * output_fp32_buffer; if (src0->type == GGML_TYPE_F32) { aclnn_mul(ctx, acl_src, acl_cos_reshape_tensor); - aclnn_mul(ctx, acl_input_roll_mul_scale_tensor, - acl_sin_reshape_tensor); + aclnn_mul(ctx, acl_input_roll_mul_scale_tensor, acl_sin_reshape_tensor); aclnn_add(ctx, acl_src, acl_input_roll_mul_scale_tensor, acl_dst); // TODO: ne0 != n_dims in mode2 } else if (src0->type == GGML_TYPE_F16) { @@ -2699,36 +2592,27 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { for (int i = 1; i < GGML_MAX_DIMS; i++) { input_fp32_nb[i] = input_fp32_nb[i - 1] * dst->ne[i - 1]; } - ggml_cann_pool_alloc fp32_allocator1( - ctx.pool(), ggml_nelements(dst) * sizeof(float)); - void* input_fp32_buffer1 = fp32_allocator1.get(); - aclTensor* input_fp32_tensor1 = ggml_cann_create_tensor( - input_fp32_buffer1, ACL_FLOAT, sizeof(float), dst->ne, - input_fp32_nb, GGML_MAX_DIMS); - ggml_cann_pool_alloc fp32_allocator2( - ctx.pool(), ggml_nelements(dst) * sizeof(float)); - void* input_fp32_buffer2 = fp32_allocator2.get(); - aclTensor* input_fp32_tensor2 = ggml_cann_create_tensor( - input_fp32_buffer2, ACL_FLOAT, sizeof(float), dst->ne, - input_fp32_nb, GGML_MAX_DIMS); - - ggml_cann_pool_alloc fp32_allocator( - ctx.pool(), ggml_nelements(dst) * sizeof(float)); - output_fp32_buffer = fp32_allocator.get(); - aclTensor* output_fp32_tensor = ggml_cann_create_tensor( - output_fp32_buffer, ACL_FLOAT, sizeof(float), dst->ne, - input_fp32_nb, GGML_MAX_DIMS); + ggml_cann_pool_alloc fp32_allocator1(ctx.pool(), ggml_nelements(dst) * sizeof(float)); + void * input_fp32_buffer1 = fp32_allocator1.get(); + aclTensor * input_fp32_tensor1 = ggml_cann_create_tensor(input_fp32_buffer1, ACL_FLOAT, sizeof(float), dst->ne, + input_fp32_nb, GGML_MAX_DIMS); + ggml_cann_pool_alloc fp32_allocator2(ctx.pool(), ggml_nelements(dst) * sizeof(float)); + void * input_fp32_buffer2 = fp32_allocator2.get(); + aclTensor * input_fp32_tensor2 = ggml_cann_create_tensor(input_fp32_buffer2, ACL_FLOAT, sizeof(float), dst->ne, + input_fp32_nb, GGML_MAX_DIMS); + + ggml_cann_pool_alloc fp32_allocator(ctx.pool(), ggml_nelements(dst) * sizeof(float)); + output_fp32_buffer = fp32_allocator.get(); + aclTensor * output_fp32_tensor = ggml_cann_create_tensor(output_fp32_buffer, ACL_FLOAT, sizeof(float), dst->ne, + input_fp32_nb, GGML_MAX_DIMS); aclnn_mul(ctx, acl_src, acl_cos_reshape_tensor, input_fp32_tensor1); - aclnn_mul(ctx, acl_input_roll_mul_scale_tensor, acl_sin_reshape_tensor, - input_fp32_tensor2); - aclnn_add(ctx, input_fp32_tensor1, input_fp32_tensor2, - output_fp32_tensor); + aclnn_mul(ctx, acl_input_roll_mul_scale_tensor, acl_sin_reshape_tensor, input_fp32_tensor2); + aclnn_add(ctx, input_fp32_tensor1, input_fp32_tensor2, output_fp32_tensor); aclnn_cast(ctx, output_fp32_tensor, acl_dst, ACL_FLOAT16); - ggml_cann_release_resources(ctx, input_fp32_tensor1, input_fp32_tensor2, - output_fp32_tensor, acl_sin_reshape_tensor, - acl_minus_one_tensor, acl_input_roll_mul_scale_tensor, - acl_input_roll_reshape_tensor, acl_src); + ggml_cann_release_resources(ctx, input_fp32_tensor1, input_fp32_tensor2, output_fp32_tensor, + acl_sin_reshape_tensor, acl_minus_one_tensor, acl_input_roll_mul_scale_tensor, + acl_input_roll_reshape_tensor, acl_src); } return; #endif @@ -2737,155 +2621,146 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { int64_t acl_mode = mode == 0 ? 1 : mode; switch (src0->type) { - case GGML_TYPE_F32: { - GGML_CANN_CALL_ACLNN_OP(ctx, RotaryPositionEmbedding, acl_src, - acl_cos_reshape_tensor, acl_sin_reshape_tensor, acl_mode, acl_dst); - break; - } - case GGML_TYPE_F16: { - ggml_cann_pool_alloc src_trans_allocator( - ctx.pool(), ggml_nelements(src0) * sizeof(float)); - void* src_trans_buffer = src_trans_allocator.get(); - ggml_cann_pool_alloc dst_trans_allocator( - ctx.pool(), ggml_nelements(dst) * sizeof(float)); - void* dst_trans_buffer = dst_trans_allocator.get(); - - size_t src_trans_nb[GGML_MAX_DIMS]; - src_trans_nb[0] = sizeof(float); - for (int i = 1; i < GGML_MAX_DIMS; i++) { - src_trans_nb[i] = src_trans_nb[i - 1] * src0->ne[i - 1]; + case GGML_TYPE_F32: + { + GGML_CANN_CALL_ACLNN_OP(ctx, RotaryPositionEmbedding, acl_src, acl_cos_reshape_tensor, + acl_sin_reshape_tensor, acl_mode, acl_dst); + break; } + case GGML_TYPE_F16: + { + ggml_cann_pool_alloc src_trans_allocator(ctx.pool(), ggml_nelements(src0) * sizeof(float)); + void * src_trans_buffer = src_trans_allocator.get(); + ggml_cann_pool_alloc dst_trans_allocator(ctx.pool(), ggml_nelements(dst) * sizeof(float)); + void * dst_trans_buffer = dst_trans_allocator.get(); - aclTensor* acl_src_trans_tensor = ggml_cann_create_tensor( - src_trans_buffer, ACL_FLOAT, sizeof(float), src0->ne, src_trans_nb, - GGML_MAX_DIMS); - aclTensor* acl_dst_trans_tensor = ggml_cann_create_tensor( - dst_trans_buffer, ACL_FLOAT, sizeof(float), dst->ne, src_trans_nb, - GGML_MAX_DIMS); + size_t src_trans_nb[GGML_MAX_DIMS]; + src_trans_nb[0] = sizeof(float); + for (int i = 1; i < GGML_MAX_DIMS; i++) { + src_trans_nb[i] = src_trans_nb[i - 1] * src0->ne[i - 1]; + } - aclnn_cast(ctx, acl_src, acl_src_trans_tensor, ACL_FLOAT); + aclTensor * acl_src_trans_tensor = ggml_cann_create_tensor(src_trans_buffer, ACL_FLOAT, sizeof(float), + src0->ne, src_trans_nb, GGML_MAX_DIMS); + aclTensor * acl_dst_trans_tensor = ggml_cann_create_tensor(dst_trans_buffer, ACL_FLOAT, sizeof(float), + dst->ne, src_trans_nb, GGML_MAX_DIMS); - GGML_CANN_CALL_ACLNN_OP(ctx, RotaryPositionEmbedding, acl_src_trans_tensor, - acl_cos_reshape_tensor, acl_sin_reshape_tensor, acl_mode, - acl_dst_trans_tensor); + aclnn_cast(ctx, acl_src, acl_src_trans_tensor, ACL_FLOAT); - aclnn_cast(ctx, acl_dst_trans_tensor, acl_dst, ACL_FLOAT16); + GGML_CANN_CALL_ACLNN_OP(ctx, RotaryPositionEmbedding, acl_src_trans_tensor, acl_cos_reshape_tensor, + acl_sin_reshape_tensor, acl_mode, acl_dst_trans_tensor); - ggml_cann_release_resources(ctx, acl_src_trans_tensor, - acl_dst_trans_tensor); - break; - } + aclnn_cast(ctx, acl_dst_trans_tensor, acl_dst, ACL_FLOAT16); + + ggml_cann_release_resources(ctx, acl_src_trans_tensor, acl_dst_trans_tensor); + break; + } default: GGML_ABORT("Unsupported tensor type for GGML_OP_ROPE"); break; } - ggml_cann_release_resources(ctx, acl_cos_reshape_tensor, - acl_sin_reshape_tensor, acl_src, acl_dst); + ggml_cann_release_resources(ctx, acl_cos_reshape_tensor, acl_sin_reshape_tensor, acl_src, acl_dst); } - - void ggml_cann_argmax(ggml_backend_cann_context& ctx, ggml_tensor* dst){ +void ggml_cann_argmax(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; - aclTensor* acl_src = ggml_cann_create_tensor(src0); - aclTensor* acl_dst = ggml_cann_create_tensor(dst, dst->ne, dst->nb, 3); + aclTensor * acl_src = ggml_cann_create_tensor(src0); + aclTensor * acl_dst = ggml_cann_create_tensor(dst, dst->ne, dst->nb, 3); GGML_CANN_CALL_ACLNN_OP(ctx, ArgMax, acl_src, 3, false, acl_dst); ggml_cann_release_resources(ctx, acl_src, acl_dst); } -void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* dst){ +void ggml_cann_conv_transpose_1d(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; ggml_tensor * src1 = dst->src[1]; // stride - int64_t s0 = ((const int32_t*)(dst->op_params))[0]; + int64_t s0 = ((const int32_t *) (dst->op_params))[0]; - aclTensor* acl_input = ggml_cann_create_tensor(src1, src1->ne, src1->nb, 3, ACL_FORMAT_NCL); - aclTensor* acl_weight = ggml_cann_create_tensor(src0, src0->ne, src0->nb, 3, ACL_FORMAT_NCL); - aclTensor* acl_dst = ggml_cann_create_tensor(dst, dst->ne, dst->nb, 3, ACL_FORMAT_NCL); + aclTensor * acl_input = ggml_cann_create_tensor(src1, src1->ne, src1->nb, 3, ACL_FORMAT_NCL); + aclTensor * acl_weight = ggml_cann_create_tensor(src0, src0->ne, src0->nb, 3, ACL_FORMAT_NCL); + aclTensor * acl_dst = ggml_cann_create_tensor(dst, dst->ne, dst->nb, 3, ACL_FORMAT_NCL); int64_t strideVal[1]; - strideVal[0] = s0; - aclIntArray *stride = aclCreateIntArray(strideVal, 1); - int64_t paddingVal[] = {0}; - aclIntArray *padding = aclCreateIntArray(paddingVal, 1); - int64_t dilationVal[] = {1}; - aclIntArray *dilation = aclCreateIntArray(dilationVal, 1); - int8_t cubeMathType = 0; + strideVal[0] = s0; + aclIntArray * stride = aclCreateIntArray(strideVal, 1); + int64_t paddingVal[] = { 0 }; + aclIntArray * padding = aclCreateIntArray(paddingVal, 1); + int64_t dilationVal[] = { 1 }; + aclIntArray * dilation = aclCreateIntArray(dilationVal, 1); + int8_t cubeMathType = 0; #ifdef ASCEND_310P cubeMathType = 1; #endif - GGML_CANN_CALL_ACLNN_OP(ctx, Convolution, acl_input, acl_weight, nullptr, stride, - padding, dilation, true, padding, 1, acl_dst, cubeMathType); + GGML_CANN_CALL_ACLNN_OP(ctx, Convolution, acl_input, acl_weight, nullptr, stride, padding, dilation, true, padding, + 1, acl_dst, cubeMathType); ggml_cann_release_resources(ctx, acl_weight, acl_dst, stride, padding, dilation); } -void ggml_cann_elu(ggml_backend_cann_context& ctx, ggml_tensor* dst){ +void ggml_cann_elu(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; - aclTensor* acl_input = ggml_cann_create_tensor(src0); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_input = ggml_cann_create_tensor(src0); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); - float alphaValue = 1.0f; - aclScalar* alpha = nullptr; - alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); + float alphaValue = 1.0f; + aclScalar * alpha = nullptr; + alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); - GGML_CANN_CALL_ACLNN_OP(ctx, Elu, acl_input, alpha, alpha, alpha, - acl_dst); + GGML_CANN_CALL_ACLNN_OP(ctx, Elu, acl_input, alpha, alpha, alpha, acl_dst); ggml_cann_release_resources(ctx, acl_input, acl_dst, alpha); } -void ggml_cann_mean(ggml_backend_cann_context& ctx, ggml_tensor* dst){ +void ggml_cann_mean(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; - aclTensor* acl_src = ggml_cann_create_tensor(src0); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src = ggml_cann_create_tensor(src0); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); - int64_t reduceDimValue[] = {3}; - aclIntArray* reduceDim = aclCreateIntArray(reduceDimValue, 1); - bool keepDim = true; + int64_t reduceDimValue[] = { 3 }; + aclIntArray * reduceDim = aclCreateIntArray(reduceDimValue, 1); + bool keepDim = true; GGML_CANN_CALL_ACLNN_OP(ctx, Mean, acl_src, reduceDim, keepDim, ACL_FLOAT, acl_dst); ggml_cann_release_resources(ctx, acl_src, acl_dst, reduceDim); } -void ggml_cann_pad_reflect_1d(ggml_backend_cann_context& ctx, ggml_tensor* dst){ - ggml_tensor * src0 = dst->src[0]; - int32_t *opts = (int32_t *) dst->op_params; - int64_t paddingsArray[2] = {opts[0], opts[1]}; - aclIntArray* paddings = aclCreateIntArray(paddingsArray, 2); +void ggml_cann_pad_reflect_1d(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src0 = dst->src[0]; + int32_t * opts = (int32_t *) dst->op_params; + int64_t paddingsArray[2] = { opts[0], opts[1] }; + aclIntArray * paddings = aclCreateIntArray(paddingsArray, 2); for (int64_t i = 0; i < src0->ne[3]; i++) { - aclTensor* acl_src = ggml_cann_create_tensor( - (char*)src0->data + i * src0->ne[3], - ggml_cann_type_mapping(src0->type), ggml_element_size(src0), - src0->ne, src0->nb, 3); + aclTensor * acl_src = + ggml_cann_create_tensor((char *) src0->data + i * src0->ne[3], ggml_cann_type_mapping(src0->type), + ggml_element_size(src0), src0->ne, src0->nb, 3); - aclTensor* acl_dst = ggml_cann_create_tensor( - (char*)dst->data + i * src0->ne[3], - ggml_cann_type_mapping(dst->type), ggml_element_size(dst), - dst->ne, dst->nb, 3); + aclTensor * acl_dst = + ggml_cann_create_tensor((char *) dst->data + i * src0->ne[3], ggml_cann_type_mapping(dst->type), + ggml_element_size(dst), dst->ne, dst->nb, 3); - GGML_CANN_CALL_ACLNN_OP(ctx, ReflectionPad1d, acl_src, paddings, acl_dst); + GGML_CANN_CALL_ACLNN_OP(ctx, ReflectionPad1d, acl_src, paddings, acl_dst); - ggml_cann_release_resources(ctx, acl_src, acl_dst); + ggml_cann_release_resources(ctx, acl_src, acl_dst); } ggml_cann_release_resources(ctx, paddings); } -void ggml_cann_count_equal(ggml_backend_cann_context& ctx, ggml_tensor* dst){ +void ggml_cann_count_equal(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; ggml_tensor * src1 = dst->src[1]; - aclTensor* acl_self = ggml_cann_create_tensor(src0); - aclTensor* acl_other = ggml_cann_create_tensor(src1); + aclTensor * acl_self = ggml_cann_create_tensor(src0); + aclTensor * acl_other = ggml_cann_create_tensor(src1); GGML_CANN_CALL_ACLNN_OP(ctx, InplaceEqTensor, acl_self, acl_other); @@ -2894,15 +2769,15 @@ void ggml_cann_count_equal(ggml_backend_cann_context& ctx, ggml_tensor* dst){ ggml_cann_release_resources(ctx, acl_self, acl_other); } -void ggml_cann_step(ggml_backend_cann_context& ctx, ggml_tensor* dst){ +void ggml_cann_step(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src0 = dst->src[0]; - aclTensor* acl_src = ggml_cann_create_tensor(src0); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src = ggml_cann_create_tensor(src0); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); - float alphaValue = 0.0f; - aclScalar* alpha = nullptr; - alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); + float alphaValue = 0.0f; + aclScalar * alpha = nullptr; + alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); GGML_CANN_CALL_ACLNN_OP(ctx, GtScalar, acl_src, alpha, acl_dst); @@ -2927,7 +2802,7 @@ void ggml_cann_step(ggml_backend_cann_context& ctx, ggml_tensor* dst){ * @note This function assumes floating-point data types and is designed for * MoE architectures, possibly involving sparse expert routing. */ -static void ggml_cann_mul_mat_id_fp(ggml_backend_cann_context& ctx, ggml_tensor* dst) { +static void ggml_cann_mul_mat_id_fp(ggml_backend_cann_context & ctx, ggml_tensor * dst) { //dst [M, K, N, 1] ggml_tensor * src0 = dst->src[0]; //src0 [D, M, A, 1] -> [D, M, K, 1] ggml_tensor * src1 = dst->src[1]; //src1 [D, B, N, 1], B = K or B = 1 -> [D, 1, K, 1] @@ -2941,36 +2816,42 @@ static void ggml_cann_mul_mat_id_fp(ggml_backend_cann_context& ctx, ggml_tensor* GGML_ASSERT(batch == ids->ne[1]); ggml_cann_pool_alloc export_allocator(ctx.pool(), src0->ne[0] * src0->ne[1] * ids->ne[0] * ggml_element_size(src0)); - void* export_ptr = export_allocator.get(); + void * export_ptr = export_allocator.get(); for (int64_t i = 0; i < batch; i++) { - aclTensor *select_index = ggml_cann_create_tensor(ids, ids->ne, ids->nb, 1, ACL_FORMAT_ND, i * ids->nb[1]); - aclTensor *export_weight = ggml_cann_create_tensor(src0, src0->ne, src0->nb, 3); + aclTensor * select_index = ggml_cann_create_tensor(ids, ids->ne, ids->nb, 1, ACL_FORMAT_ND, i * ids->nb[1]); + aclTensor * export_weight = ggml_cann_create_tensor(src0, src0->ne, src0->nb, 3); - int64_t select_export_ne[] = {src0->ne[0], src0->ne[1], ids->ne[0]}; - size_t select_export_nb[3]; + int64_t select_export_ne[] = { src0->ne[0], src0->ne[1], ids->ne[0] }; + size_t select_export_nb[3]; select_export_nb[0] = src0->nb[0]; - for (int k = 1;k < 3; k++) { - select_export_nb[k] = select_export_nb[k-1] * select_export_ne[k-1]; + for (int k = 1; k < 3; k++) { + select_export_nb[k] = select_export_nb[k - 1] * select_export_ne[k - 1]; } - aclTensor *select_export = ggml_cann_create_tensor(export_ptr, ggml_cann_type_mapping(src0->type), ggml_element_size(src0), select_export_ne, select_export_nb, 3); + aclTensor * select_export = + ggml_cann_create_tensor(export_ptr, ggml_cann_type_mapping(src0->type), ggml_element_size(src0), + select_export_ne, select_export_nb, 3); GGML_CANN_CALL_ACLNN_OP(ctx, IndexSelect, export_weight, 0, select_index, select_export); - int64_t select_transpose_ne[] = {select_export_ne[1], select_export_ne[0], select_export_ne[2]}; - size_t select_transpose_nb[] = {select_export_nb[1], select_export_nb[0], select_export_nb[2]}; - aclTensor *select_export_transpose = ggml_cann_create_tensor(export_ptr, ggml_cann_type_mapping(src0->type), ggml_element_size(src0), select_transpose_ne, select_transpose_nb, 3); + int64_t select_transpose_ne[] = { select_export_ne[1], select_export_ne[0], select_export_ne[2] }; + size_t select_transpose_nb[] = { select_export_nb[1], select_export_nb[0], select_export_nb[2] }; + aclTensor * select_export_transpose = + ggml_cann_create_tensor(export_ptr, ggml_cann_type_mapping(src0->type), ggml_element_size(src0), + select_transpose_ne, select_transpose_nb, 3); - int64_t active_tensor_ne[] = {src1->ne[0], 1, src1->ne[1]}; - size_t active_tensor_nb[] = {src1->nb[0], src1->nb[1], src1->nb[1]}; - aclTensor *active_tensor = ggml_cann_create_tensor(src1, active_tensor_ne, active_tensor_nb, 3, ACL_FORMAT_ND, i * src1->nb[2]); + int64_t active_tensor_ne[] = { src1->ne[0], 1, src1->ne[1] }; + size_t active_tensor_nb[] = { src1->nb[0], src1->nb[1], src1->nb[1] }; + aclTensor * active_tensor = + ggml_cann_create_tensor(src1, active_tensor_ne, active_tensor_nb, 3, ACL_FORMAT_ND, i * src1->nb[2]); - int64_t dst_ne[] = {dst->ne[0], 1, dst->ne[1]}; - size_t dst_nb[] = {dst->nb[0], dst->nb[1], dst->nb[1]}; - aclTensor *acl_dst = ggml_cann_create_tensor(dst, dst_ne,dst_nb, 3, ACL_FORMAT_ND, i * dst->nb[2]); + int64_t dst_ne[] = { dst->ne[0], 1, dst->ne[1] }; + size_t dst_nb[] = { dst->nb[0], dst->nb[1], dst->nb[1] }; + aclTensor * acl_dst = ggml_cann_create_tensor(dst, dst_ne, dst_nb, 3, ACL_FORMAT_ND, i * dst->nb[2]); GGML_CANN_CALL_ACLNN_OP(ctx, BatchMatMul, active_tensor, select_export_transpose, acl_dst, 2); - ggml_cann_release_resources(ctx, select_index, export_weight, select_export, active_tensor, acl_dst, select_export_transpose); + ggml_cann_release_resources(ctx, select_index, export_weight, select_export, active_tensor, acl_dst, + select_export_transpose); } } @@ -2997,7 +2878,7 @@ static void ggml_cann_mul_mat_id_fp(ggml_backend_cann_context& ctx, ggml_tensor* * @note This function assumes quantized data types and is designed for * MoE architectures with potential sparse expert routing. */ -static void ggml_cann_mul_mat_id_quant(ggml_backend_cann_context& ctx, ggml_tensor* dst) { +static void ggml_cann_mul_mat_id_quant(ggml_backend_cann_context & ctx, ggml_tensor * dst) { // TODO: Use aclnnGroupedMatMul //dst [M, K, N, 1] ggml_tensor * src0 = dst->src[0]; //src0 [D, M, A, 1] @@ -3007,24 +2888,23 @@ static void ggml_cann_mul_mat_id_quant(ggml_backend_cann_context& ctx, ggml_tens GGML_TENSOR_BINARY_OP_LOCALS // copy index from npu to cpu - int64_t n_as = ne02; // A - int64_t n_ids = ids->ne[0]; // K + int64_t n_as = ne02; // A + int64_t n_ids = ids->ne[0]; // K std::vector ids_host(ggml_nbytes(ids)); - ggml_cann_async_memcpy(ctx, ids_host.data(), ids->data, ggml_nbytes(ids), - ACL_MEMCPY_DEVICE_TO_HOST); + ggml_cann_async_memcpy(ctx, ids_host.data(), ids->data, ggml_nbytes(ids), ACL_MEMCPY_DEVICE_TO_HOST); ACL_CHECK(aclrtSynchronizeStream(ctx.stream())); char * src0_original = (char *) src0->data; char * src1_original = (char *) src1->data; - char * dst_original = (char *) dst->data; + char * dst_original = (char *) dst->data; ggml_tensor src0_row = *src0; ggml_tensor src1_row = *src1; - ggml_tensor dst_row = *dst; + ggml_tensor dst_row = *dst; const enum ggml_type type = dst->src[0]->type; - float weight_elem_size; + float weight_elem_size; if (type == GGML_TYPE_Q4_0) { weight_elem_size = float(sizeof(uint8_t)) / 2; } else if (type == GGML_TYPE_Q8_0) { @@ -3034,18 +2914,18 @@ static void ggml_cann_mul_mat_id_quant(ggml_backend_cann_context& ctx, ggml_tens } // src0_row [D, M, 1, 1] weight without permute - src0_row.ne[2] = 1; - src0_row.ne[3] = 1; - src0_row.nb[0] = weight_elem_size; - src0_row.nb[1] = weight_elem_size * ne00; - src0_row.nb[2] = weight_elem_size * ne00; - src0_row.nb[3] = weight_elem_size * ne00; + src0_row.ne[2] = 1; + src0_row.ne[3] = 1; + src0_row.nb[0] = weight_elem_size; + src0_row.nb[1] = weight_elem_size * ne00; + src0_row.nb[2] = weight_elem_size * ne00; + src0_row.nb[3] = weight_elem_size * ne00; size_t weight_stride = ne00 * ne01 * weight_elem_size; - size_t weight_size = weight_stride * ne02 * ne03; + size_t weight_size = weight_stride * ne02 * ne03; // scale [D, M, 1, 1] -> scale && permute size_t scale_elem_size = sizeof(uint16_t); - size_t scale_stride = src0->ne[1] * src0->ne[0] / QK8_0 * scale_elem_size; + size_t scale_stride = src0->ne[1] * src0->ne[0] / QK8_0 * scale_elem_size; // src1_row [D, 1, 1, 1] -> input src1_row.ne[1] = 1; @@ -3063,11 +2943,11 @@ static void ggml_cann_mul_mat_id_quant(ggml_backend_cann_context& ctx, ggml_tens //create weight for one row ggml_cann_pool_alloc weight_allocator(ctx.pool()); - void* weight_buffer = weight_allocator.alloc(nb02); + void * weight_buffer = weight_allocator.alloc(nb02); for (int64_t iid1 = 0; iid1 < ids->ne[1]; iid1++) { for (int64_t id = 0; id < n_ids; id++) { // expert index - int32_t i02 = *(int32_t *) (ids_host.data() + iid1*ids->nb[1] + id*ids->nb[0]); + int32_t i02 = *(int32_t *) (ids_host.data() + iid1 * ids->nb[1] + id * ids->nb[0]); GGML_ASSERT(i02 >= 0 && i02 < n_as); // If B = 1 (broadcast), always use 0; otherwise, use id. @@ -3077,21 +2957,19 @@ static void ggml_cann_mul_mat_id_quant(ggml_backend_cann_context& ctx, ggml_tens int64_t i1 = id; int64_t i2 = i12; - void* src0_tmp_ptr = src0_original + i02*weight_stride; - void* scale_tmp_ptr = src0_original + weight_size + i02*scale_stride; - void* src1_tmp_ptr = src1_original + i11*nb11 + i12*nb12; - void* dst_tmp_ptr = dst_original + i1*nb1 + i2*nb2; + void * src0_tmp_ptr = src0_original + i02 * weight_stride; + void * scale_tmp_ptr = src0_original + weight_size + i02 * scale_stride; + void * src1_tmp_ptr = src1_original + i11 * nb11 + i12 * nb12; + void * dst_tmp_ptr = dst_original + i1 * nb1 + i2 * nb2; // mem cpy - ggml_cann_async_memcpy(ctx, weight_buffer, src0_tmp_ptr, weight_stride, - ACL_MEMCPY_DEVICE_TO_DEVICE); - void* scale_buffer = (char*)weight_buffer + weight_stride; - ggml_cann_async_memcpy(ctx, scale_buffer, scale_tmp_ptr, scale_stride, - ACL_MEMCPY_DEVICE_TO_DEVICE); - - src0_row.data = weight_buffer; - src1_row.data = src1_tmp_ptr; - dst_row.data = dst_tmp_ptr; + ggml_cann_async_memcpy(ctx, weight_buffer, src0_tmp_ptr, weight_stride, ACL_MEMCPY_DEVICE_TO_DEVICE); + void * scale_buffer = (char *) weight_buffer + weight_stride; + ggml_cann_async_memcpy(ctx, scale_buffer, scale_tmp_ptr, scale_stride, ACL_MEMCPY_DEVICE_TO_DEVICE); + + src0_row.data = weight_buffer; + src1_row.data = src1_tmp_ptr; + dst_row.data = dst_tmp_ptr; dst_row.src[0] = &src0_row; dst_row.src[1] = &src1_row; @@ -3101,7 +2979,7 @@ static void ggml_cann_mul_mat_id_quant(ggml_backend_cann_context& ctx, ggml_tens return; } -void ggml_cann_mul_mat_id(ggml_backend_cann_context& ctx, ggml_tensor* dst) { +void ggml_cann_mul_mat_id(ggml_backend_cann_context & ctx, ggml_tensor * dst) { const enum ggml_type type = dst->src[0]->type; switch (type) { case GGML_TYPE_F32: @@ -3118,12 +2996,11 @@ void ggml_cann_mul_mat_id(ggml_backend_cann_context& ctx, ggml_tensor* dst) { } } -void ggml_cann_flash_attn_ext(ggml_backend_cann_context& ctx, ggml_tensor* dst){ - - ggml_tensor* src0 = dst->src[0]; // q, fp32 | B, N, S, D (uncont) -> B, S, N, D (cont) - ggml_tensor* src1 = dst->src[1]; // k, fp16 | B, N, S, D (uncont) -> B, S, N, D (cont) - ggml_tensor* src2 = dst->src[2]; // v, fp16 | B, N, S, D (uncont) -> B, S, N, D (cont) - ggml_tensor* src3 = dst->src[3]; // mask, fp16 +void ggml_cann_flash_attn_ext(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src0 = dst->src[0]; // q, fp32 | B, N, S, D (uncont) -> B, S, N, D (cont) + ggml_tensor * src1 = dst->src[1]; // k, fp16 | B, N, S, D (uncont) -> B, S, N, D (cont) + ggml_tensor * src2 = dst->src[2]; // v, fp16 | B, N, S, D (uncont) -> B, S, N, D (cont) + ggml_tensor * src3 = dst->src[3]; // mask, fp16 // B, N, S, D (uncont) -> B, S, N, D (cont) int64_t src0_bsnd_ne[GGML_MAX_DIMS]; @@ -3139,107 +3016,96 @@ void ggml_cann_flash_attn_ext(ggml_backend_cann_context& ctx, ggml_tensor* dst){ size_t src2_bsnd_nb[GGML_MAX_DIMS]; memcpy(src2_bsnd_nb, src2->nb, GGML_MAX_DIMS * sizeof(size_t)); - auto transpose12 = [](int64_t* ne, size_t* nb) { + auto transpose12 = [](int64_t * ne, size_t * nb) { int64_t ne_tmp = ne[1]; size_t nb_tmp = nb[1]; - ne[1] = ne[2]; - nb[1] = nb[2]; - ne[2] = ne_tmp; - nb[2] = nb_tmp; + ne[1] = ne[2]; + nb[1] = nb[2]; + ne[2] = ne_tmp; + nb[2] = nb_tmp; }; transpose12(src0_bsnd_ne, src0_bsnd_nb); transpose12(src1_bsnd_ne, src1_bsnd_nb); transpose12(src2_bsnd_ne, src2_bsnd_nb); - float maxBias = 0.0f; - float scaleValue = 1.0f; + float maxBias = 0.0f; + float scaleValue = 1.0f; float logitSoftcap = 0.0f; - memcpy(&scaleValue, (float*)dst->op_params + 0, sizeof(float)); - memcpy(&maxBias, (float*)dst->op_params + 1, sizeof(float)); - memcpy(&logitSoftcap, (float*)dst->op_params + 2, sizeof(float)); + memcpy(&scaleValue, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&maxBias, (float *) dst->op_params + 1, sizeof(float)); + memcpy(&logitSoftcap, (float *) dst->op_params + 2, sizeof(float)); - if(logitSoftcap == 0.0f){ + if (logitSoftcap == 0.0f) { size_t faElemSize = sizeof(uint16_t); - auto faDataType = ACL_FLOAT16; //ACL_BF16; + auto faDataType = ACL_FLOAT16; //ACL_BF16; - aclTensor* acl_src0_f16_tensor = nullptr; - aclTensor* acl_src1_f16_tensor = nullptr; - aclTensor* acl_src2_f16_tensor = nullptr; + aclTensor * acl_src0_f16_tensor = nullptr; + aclTensor * acl_src1_f16_tensor = nullptr; + aclTensor * acl_src2_f16_tensor = nullptr; // Step 1: cast the src0 (Query) to fp16 if needed ggml_cann_pool_alloc src0_f16_allocator(ctx.pool()); - void* src0_f16_buffer = nullptr; + void * src0_f16_buffer = nullptr; - if(ggml_cann_type_mapping(src0->type) != faDataType){ - aclTensor* acl_src0_f32_tensor = ggml_cann_create_tensor(src0, src0_bsnd_ne, - src0_bsnd_nb, GGML_MAX_DIMS); - src0_f16_buffer = src0_f16_allocator.alloc( - ggml_nelements(src0) * faElemSize); + if (ggml_cann_type_mapping(src0->type) != faDataType) { + aclTensor * acl_src0_f32_tensor = ggml_cann_create_tensor(src0, src0_bsnd_ne, src0_bsnd_nb, GGML_MAX_DIMS); + src0_f16_buffer = src0_f16_allocator.alloc(ggml_nelements(src0) * faElemSize); - int64_t* src0_f16_ne = src0_bsnd_ne; - size_t src0_f16_nb[GGML_MAX_DIMS]; + int64_t * src0_f16_ne = src0_bsnd_ne; + size_t src0_f16_nb[GGML_MAX_DIMS]; src0_f16_nb[0] = sizeof(uint16_t); - for(int i = 1; i < GGML_MAX_DIMS; ++i){ + for (int i = 1; i < GGML_MAX_DIMS; ++i) { src0_f16_nb[i] = src0_f16_nb[i - 1] * src0_f16_ne[i - 1]; } - acl_src0_f16_tensor = ggml_cann_create_tensor( - src0_f16_buffer, faDataType, faElemSize, - src0_f16_ne, src0_f16_nb, GGML_MAX_DIMS - ); + acl_src0_f16_tensor = ggml_cann_create_tensor(src0_f16_buffer, faDataType, faElemSize, src0_f16_ne, + src0_f16_nb, GGML_MAX_DIMS); aclnn_cast(ctx, acl_src0_f32_tensor, acl_src0_f16_tensor, faDataType); ggml_cann_release_resources(ctx, acl_src0_f32_tensor); - }else{ - acl_src0_f16_tensor = ggml_cann_create_tensor(src0, src0_bsnd_ne, - src0_bsnd_nb, GGML_MAX_DIMS); + } else { + acl_src0_f16_tensor = ggml_cann_create_tensor(src0, src0_bsnd_ne, src0_bsnd_nb, GGML_MAX_DIMS); } // Step 2: create the acl tensors for src1 (Key), src2 (Value), // and the direct output from FusedInferAttention - acl_src1_f16_tensor = ggml_cann_create_tensor(src1, src1_bsnd_ne, - src1_bsnd_nb, GGML_MAX_DIMS); - acl_src2_f16_tensor = ggml_cann_create_tensor(src2, src2_bsnd_ne, - src2_bsnd_nb, GGML_MAX_DIMS); + acl_src1_f16_tensor = ggml_cann_create_tensor(src1, src1_bsnd_ne, src1_bsnd_nb, GGML_MAX_DIMS); + acl_src2_f16_tensor = ggml_cann_create_tensor(src2, src2_bsnd_ne, src2_bsnd_nb, GGML_MAX_DIMS); // Step 3: create the PSEShift tensor if needed // this tensor is considered as mask (f16) in the llama.cpp - aclTensor* bcast_pse_tensor = nullptr; + aclTensor * bcast_pse_tensor = nullptr; ggml_cann_pool_alloc bcast_pse_allocator(ctx.pool()); - if(src3 != nullptr){ + if (src3 != nullptr) { // Construct the truncated pse tensor (common for prefill/decode) int64_t trunc_pse_ne[GGML_MAX_DIMS] = { - src3->ne[0], // D - src0->ne[1], // S (number of Q tokens) - src3->ne[2], // mask N - src3->ne[3] // B + src3->ne[0], // D + src0->ne[1], // S (number of Q tokens) + src3->ne[2], // mask N + src3->ne[3] // B }; - size_t* trunc_pse_nb = src3->nb; + size_t * trunc_pse_nb = src3->nb; - aclTensor* acl_mask_f16_trunc_tensor = ggml_cann_create_tensor( - src3->data, ACL_FLOAT16, sizeof(uint16_t), - trunc_pse_ne, trunc_pse_nb, GGML_MAX_DIMS - ); + aclTensor * acl_mask_f16_trunc_tensor = ggml_cann_create_tensor(src3->data, ACL_FLOAT16, sizeof(uint16_t), + trunc_pse_ne, trunc_pse_nb, GGML_MAX_DIMS); int64_t bcast_pse_ne[GGML_MAX_DIMS]; - size_t bcast_pse_nb[GGML_MAX_DIMS]; - bcast_pse_ne[0] = src3->ne[0]; // D - bcast_pse_ne[1] = src0->ne[1]; // S - bcast_pse_ne[2] = src0->ne[2]; // N (num_heads) - bcast_pse_ne[3] = src3->ne[3]; // B + size_t bcast_pse_nb[GGML_MAX_DIMS]; + bcast_pse_ne[0] = src3->ne[0]; // D + bcast_pse_ne[1] = src0->ne[1]; // S + bcast_pse_ne[2] = src0->ne[2]; // N (num_heads) + bcast_pse_ne[3] = src3->ne[3]; // B if (maxBias == 0.0f) { // When maxBias == 0.0f, use nb = 0 reduce once repeat (Qwen2) // Construct the bcast tensor (simulate repeat on the head dimension using stride=0) bcast_pse_nb[0] = sizeof(uint16_t); bcast_pse_nb[1] = bcast_pse_nb[0] * bcast_pse_ne[0]; - bcast_pse_nb[2] = 0; // <---- the head dimension shares the same data + bcast_pse_nb[2] = 0; // <---- the head dimension shares the same data bcast_pse_nb[3] = src3->nb[3]; - bcast_pse_tensor = ggml_cann_create_tensor( - src3->data, ACL_FLOAT16, sizeof(uint16_t), - bcast_pse_ne, bcast_pse_nb, GGML_MAX_DIMS - ); + bcast_pse_tensor = ggml_cann_create_tensor(src3->data, ACL_FLOAT16, sizeof(uint16_t), bcast_pse_ne, + bcast_pse_nb, GGML_MAX_DIMS); ggml_cann_release_resources(ctx, acl_mask_f16_trunc_tensor); } else { @@ -3248,35 +3114,31 @@ void ggml_cann_flash_attn_ext(ggml_backend_cann_context& ctx, ggml_tensor* dst){ bcast_pse_nb[i] = bcast_pse_nb[i - 1] * bcast_pse_ne[i - 1]; } - void* bcast_pse_buffer = bcast_pse_allocator.alloc( - ggml_nelements(src3) * src0->ne[2] * sizeof(uint16_t) - ); + void * bcast_pse_buffer = + bcast_pse_allocator.alloc(ggml_nelements(src3) * src0->ne[2] * sizeof(uint16_t)); - bcast_pse_tensor = ggml_cann_create_tensor( - bcast_pse_buffer, ACL_FLOAT16, sizeof(uint16_t), - bcast_pse_ne, bcast_pse_nb, GGML_MAX_DIMS - ); + bcast_pse_tensor = ggml_cann_create_tensor(bcast_pse_buffer, ACL_FLOAT16, sizeof(uint16_t), + bcast_pse_ne, bcast_pse_nb, GGML_MAX_DIMS); - int64_t repeats[] = {1, src0->ne[2], 1, 1}; + int64_t repeats[] = { 1, src0->ne[2], 1, 1 }; aclnn_repeat(ctx, acl_mask_f16_trunc_tensor, bcast_pse_tensor, repeats); // alibi // Compute the slope if needed. Derived from ggml_cann_softmax(). - const int64_t n_heads = src0->ne[2]; + const int64_t n_heads = src0->ne[2]; ggml_cann_pool_alloc slope_allocator(ctx.pool(), n_heads * sizeof(uint16_t)); - void* slope_buffer = slope_allocator.get(); + void * slope_buffer = slope_allocator.get(); aclnn_get_slope(ctx, n_heads, slope_buffer, maxBias, GGML_TYPE_F16); - int64_t slope_ne[] = {1, 1, n_heads, 1}; - size_t slope_nb[GGML_MAX_DIMS]; + int64_t slope_ne[] = { 1, 1, n_heads, 1 }; + size_t slope_nb[GGML_MAX_DIMS]; slope_nb[0] = sizeof(uint16_t); - for(int i = 1;ine[2]; // N - int64_t numKeyValueHeads = src1->ne[2]; + int kvTensorNum = 1; + aclTensor * acl_q_tensor = acl_src0_f16_tensor; + aclTensor * acl_k_tensors[] = { acl_src1_f16_tensor }; + aclTensor * acl_v_tensors[] = { acl_src2_f16_tensor }; + aclTensorList * acl_k_tensor_list = aclCreateTensorList(acl_k_tensors, kvTensorNum); + aclTensorList * acl_v_tensor_list = aclCreateTensorList(acl_v_tensors, kvTensorNum); + + int64_t numHeads = src0->ne[2]; // N + int64_t numKeyValueHeads = src1->ne[2]; // double scaleValue = 1 / sqrt(src0->ne[0]); // 1/sqrt(d) - int64_t preTokens = 65535; - int64_t nextTokens = 65535; - char layout[5] = {'B', 'S', 'N', 'D', 0}; - int64_t sparseMode = 0; - int64_t innerPrecise = (src0->ne[1] == 1) ? 0 : 2; - int64_t blockSize = 0; - int64_t antiquantMode = 0; - bool softmaxLseFlag = false; - int64_t keyAntiquantMode = 0; + int64_t preTokens = 65535; + int64_t nextTokens = 65535; + char layout[5] = { 'B', 'S', 'N', 'D', 0 }; + int64_t sparseMode = 0; + int64_t innerPrecise = (src0->ne[1] == 1) ? 0 : 2; + int64_t blockSize = 0; + int64_t antiquantMode = 0; + bool softmaxLseFlag = false; + int64_t keyAntiquantMode = 0; int64_t valueAntiquantMode = 0; GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); - aclTensor * fa_dst_tensor = nullptr; - aclTensor * acl_dst_tensor = nullptr; + aclTensor * fa_dst_tensor = nullptr; + aclTensor * acl_dst_tensor = nullptr; ggml_cann_pool_alloc out_f16_allocator(ctx.pool()); if (dst->type == GGML_TYPE_F32) { - void* out_f16_buffer = out_f16_allocator.alloc( - ggml_nelements(dst) * faElemSize); + void * out_f16_buffer = out_f16_allocator.alloc(ggml_nelements(dst) * faElemSize); - int64_t* out_f16_ne = src0_bsnd_ne; - size_t out_f16_nb[GGML_MAX_DIMS]; + int64_t * out_f16_ne = src0_bsnd_ne; + size_t out_f16_nb[GGML_MAX_DIMS]; out_f16_nb[0] = faElemSize; - for(int i = 1; i < GGML_MAX_DIMS; ++i){ + for (int i = 1; i < GGML_MAX_DIMS; ++i) { out_f16_nb[i] = out_f16_nb[i - 1] * out_f16_ne[i - 1]; } - fa_dst_tensor = ggml_cann_create_tensor( - out_f16_buffer, faDataType, faElemSize, - out_f16_ne, out_f16_nb, GGML_MAX_DIMS - ); - } - else { + fa_dst_tensor = + ggml_cann_create_tensor(out_f16_buffer, faDataType, faElemSize, out_f16_ne, out_f16_nb, GGML_MAX_DIMS); + } else { fa_dst_tensor = ggml_cann_create_tensor(dst); } - GGML_CANN_CALL_ACLNN_OP(ctx, FusedInferAttentionScoreV2, - acl_q_tensor, acl_k_tensor_list, acl_v_tensor_list, // q, k, v - bcast_pse_tensor, nullptr, // pse, mask - nullptr, nullptr, // actSeqLen, actSeqLenkv - nullptr, nullptr, // deqScale1, quantScale1 - nullptr, nullptr, nullptr, // deqScale2, quantScale2, quantOffset2 - nullptr, nullptr, // antiquantScale, antiquantOffset - nullptr, // blockTable - nullptr, nullptr, // qPadSize, kvPadSize - nullptr, nullptr, // kAntiquantScale, kAntiQuantOffset - nullptr, nullptr, // vAntiquantScale, vAntiQuantOffset - nullptr, nullptr, nullptr, // kSharedPrefix, vSharedPrefix, actSharedLen - numHeads, scaleValue, // heads, scaleValue - preTokens, nextTokens, // preTokens, nextTokens - layout, // inputLayout - numKeyValueHeads, // numKVHeads - sparseMode, innerPrecise, // sparseMode, innerPrecise - blockSize, antiquantMode, // blockSize, antiquantMode - softmaxLseFlag, // softmaxLseFlag - keyAntiquantMode, valueAntiquantMode, // keyAntiqMode, valueAntiqMode - fa_dst_tensor, // attentionOut - nullptr // softmaxLse + GGML_CANN_CALL_ACLNN_OP(ctx, FusedInferAttentionScoreV2, acl_q_tensor, acl_k_tensor_list, + acl_v_tensor_list, // q, k, v + bcast_pse_tensor, nullptr, // pse, mask + nullptr, nullptr, // actSeqLen, actSeqLenkv + nullptr, nullptr, // deqScale1, quantScale1 + nullptr, nullptr, nullptr, // deqScale2, quantScale2, quantOffset2 + nullptr, nullptr, // antiquantScale, antiquantOffset + nullptr, // blockTable + nullptr, nullptr, // qPadSize, kvPadSize + nullptr, nullptr, // kAntiquantScale, kAntiQuantOffset + nullptr, nullptr, // vAntiquantScale, vAntiQuantOffset + nullptr, nullptr, nullptr, // kSharedPrefix, vSharedPrefix, actSharedLen + numHeads, scaleValue, // heads, scaleValue + preTokens, nextTokens, // preTokens, nextTokens + layout, // inputLayout + numKeyValueHeads, // numKVHeads + sparseMode, innerPrecise, // sparseMode, innerPrecise + blockSize, antiquantMode, // blockSize, antiquantMode + softmaxLseFlag, // softmaxLseFlag + keyAntiquantMode, valueAntiquantMode, // keyAntiqMode, valueAntiqMode + fa_dst_tensor, // attentionOut + nullptr // softmaxLse ); if (dst->type == GGML_TYPE_F32) { // Step 6: post-processing, permute and cast to f32 - aclTensor* acl_dst_tensor = ggml_cann_create_tensor(dst); + aclTensor * acl_dst_tensor = ggml_cann_create_tensor(dst); aclnn_cast(ctx, fa_dst_tensor, acl_dst_tensor, ggml_cann_type_mapping(dst->type)); } - ggml_cann_release_resources(ctx, acl_src0_f16_tensor, - acl_k_tensor_list, - acl_v_tensor_list, - fa_dst_tensor, - acl_dst_tensor, - bcast_pse_tensor); + ggml_cann_release_resources(ctx, acl_src0_f16_tensor, acl_k_tensor_list, acl_v_tensor_list, fa_dst_tensor, + acl_dst_tensor, bcast_pse_tensor); } else { GGML_ABORT("Function is not implemented."); diff --git a/ggml/src/ggml-cann/aclnn_ops.h b/ggml/src/ggml-cann/aclnn_ops.h old mode 100755 new mode 100644 index 5c510cc9932e8..ec7455af88cd5 --- a/ggml/src/ggml-cann/aclnn_ops.h +++ b/ggml/src/ggml-cann/aclnn_ops.h @@ -62,7 +62,7 @@ * @param dst The ggml tensor representing the destination, which op is * GGML_OP_REPEAT and specifies the desired dimensions. */ -void ggml_cann_repeat(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_repeat(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies the Leaky ReLU activation function to a tensor using the CANN @@ -82,7 +82,7 @@ void ggml_cann_repeat(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the result of the Leaky ReLU * activation is stored, which op is `GGML_OP_LEAKY_RELU` */ -void ggml_cann_leaky_relu(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_leaky_relu(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Concatenates multiple tensors along a specified dimension using the @@ -97,7 +97,7 @@ void ggml_cann_leaky_relu(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @attention tensorList length should be 2 and the dimension using for concat * default to 1. */ -void ggml_cann_concat(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_concat(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Generates a sequence of evenly spaced values within a specified @@ -113,7 +113,7 @@ void ggml_cann_concat(ggml_backend_cann_context& ctx, ggml_tensor* dst); * `start`, 'stop' and 'step' are in dst->op_params and dst->op is * `GGML_OP_ARANGE`. */ -void ggml_cann_arange(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_arange(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies a clamp operation to the elements of a ggml tensor using the @@ -131,7 +131,7 @@ void ggml_cann_arange(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the clamped values will be stored. * dst->op is `GGML_OP_CLAMP`, `min` and `max` value is in dst->params. */ -void ggml_cann_clamp(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_clamp(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Scales the elements of a ggml tensor by a constant factor using the @@ -148,7 +148,7 @@ void ggml_cann_clamp(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the scaled values will be stored. * dst->op is `GGML_OP_SCALE` and `scale` value is in dst->params. */ -void ggml_cann_scale(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_scale(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Sorts the elements of a ggml tensor and returns the indices that @@ -163,7 +163,7 @@ void ggml_cann_scale(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the sorted indices will be stored. * dst->op is `GGML_OP_ARGSORT`. */ -void ggml_cann_argsort(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_argsort(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the Layer Normalization for a ggml tensor using the CANN @@ -185,7 +185,7 @@ void ggml_cann_argsort(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the normalized values will be stored. * @attention `Var` defaults to dst->ne[0]. */ -void ggml_cann_norm(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the Group Normalization for a ggml tensor using the CANN @@ -209,7 +209,7 @@ void ggml_cann_norm(ggml_backend_cann_context& ctx, ggml_tensor* dst); * * @attention eps defaults to 1e-6f. */ -void ggml_cann_group_norm(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_group_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the accumulation of tensors using the CANN backend. @@ -228,7 +228,7 @@ void ggml_cann_group_norm(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the accumulated values will be stored. * `inplace` is in dst->params, and dst->op is `GGML_OP_ACC`. */ -void ggml_cann_acc(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_acc(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the sum of elements along the last dimension of a ggml tensor @@ -244,7 +244,7 @@ void ggml_cann_acc(ggml_backend_cann_context& ctx, ggml_tensor* dst); * * @attention `reduce_dims` defaults to 3, which means the last dimension. */ -void ggml_cann_sum_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_sum_rows(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the sum of elements in a ggml tensor. @@ -258,7 +258,7 @@ void ggml_cann_sum_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst); * */ -void ggml_cann_sum(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_sum(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Upsamples a ggml tensor using nearest neighbor interpolation using @@ -274,8 +274,7 @@ void ggml_cann_sum(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the upsampled values will be stored. * dst->op is `GGML_OP_UPSCALE`. */ -void ggml_cann_upsample_nearest2d(ggml_backend_cann_context& ctx, - ggml_tensor* dst); +void ggml_cann_upsample_nearest2d(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Pads a ggml tensor to match the dimensions of the destination tensor @@ -290,7 +289,7 @@ void ggml_cann_upsample_nearest2d(ggml_backend_cann_context& ctx, * @param dst The destination tensor, which specifies the target dimensions for * padding. dst->op is `GGML_OP_PAD`. */ -void ggml_cann_pad(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_pad(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Executes a 2D pooling operation on a ggml tensor using the CANN @@ -307,7 +306,7 @@ void ggml_cann_pad(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor on which the pooling operation is to be * performed. dst->op is `GGML_OP_POOL_2D`. */ -void ggml_cann_pool2d(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_pool2d(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Duplicates a ggml tensor using the CANN backend. @@ -326,7 +325,7 @@ void ggml_cann_pool2d(ggml_backend_cann_context& ctx, ggml_tensor* dst); * different shape and dst is no-contiguous. * @note: This func need to simplify. */ -void ggml_cann_dup(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_dup(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the Root Mean Square (RMS) normalization of a ggml tensor @@ -348,7 +347,7 @@ void ggml_cann_dup(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the normalized values will be stored. * dst->op is `GGML_OP_RMS_NORM`. */ -void ggml_cann_rms_norm(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_rms_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies a diagonal mask to the tensor with a specified value. @@ -363,7 +362,7 @@ void ggml_cann_rms_norm(ggml_backend_cann_context& ctx, ggml_tensor* dst); * `GGML_OP_DIAG_MASK` * @param value The value to use for masking. */ -void ggml_cann_diag_mask(ggml_backend_cann_context& ctx, ggml_tensor* dst, float value); +void ggml_cann_diag_mask(ggml_backend_cann_context & ctx, ggml_tensor * dst, float value); /** * @brief Performs an image-to-column transformation on the input tensor. @@ -378,7 +377,7 @@ void ggml_cann_diag_mask(ggml_backend_cann_context& ctx, ggml_tensor* dst, float * @param dst The destination tensor that stores the result of the operation. * dst->op is `GGML_OP_IM2COL`. */ -void ggml_cann_im2col(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_im2col(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes time step embeddings using sine and cosine functions. @@ -392,10 +391,10 @@ void ggml_cann_im2col(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the result of the embedding operation * will be stored. dst->op is `GGML_OP_TIMESTEP_EMBEDDING`. */ -void ggml_cann_timestep_embedding(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_timestep_embedding(ggml_backend_cann_context & ctx, ggml_tensor * dst); // @see ggml_cann_dup. -void ggml_cann_cpy(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_cpy(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the softmax activation with optional masking. @@ -417,7 +416,7 @@ void ggml_cann_cpy(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the result will be stored. dst->op is * `GGML_OP_SOFTMAX`. */ -void ggml_cann_softmax(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_softmax(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Extracts specific rows from a tensor based on indices. @@ -429,7 +428,7 @@ void ggml_cann_softmax(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param ctx The backend CANN context for executing operations. * @param dst The destination tensor where the extracted rows will be stored. */ -void ggml_cann_get_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_get_rows(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Writes specific rows into a tensor at positions specified by indices. @@ -441,7 +440,7 @@ void ggml_cann_get_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param ctx The backend CANN context for executing operations. * @param dst The destination tensor where the specified rows will be updated. */ -void ggml_cann_set_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_set_rows(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Executes matrix multiplication for the given tensor. @@ -454,7 +453,7 @@ void ggml_cann_set_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor for storing the result of the matrix * multiplication. dst->op is `GGML_OP_MUL_MAT`. */ -void ggml_cann_mul_mat(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_mul_mat(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies Rotary Positional Embedding (RoPE) to the input tensor. @@ -477,7 +476,7 @@ void ggml_cann_mul_mat(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @note The function currently does not support cases where the freq_scale is * not equal 1. */ -void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_rope(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the index of the maximum value along the specified dimension @@ -492,7 +491,7 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the indices of the maximum values will * be stored. dst->op is `GGML_OP_ARGMAX`. */ -void ggml_cann_argmax(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_argmax(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Adds two tensors element-wise and stores the result in a destination @@ -509,8 +508,10 @@ void ggml_cann_argmax(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param acl_src1 The second source tensor. * @param acl_dst The destination tensor where the result will be stored. */ -void aclnn_add(ggml_backend_cann_context& ctx, aclTensor* acl_src0, - aclTensor* acl_src1, aclTensor* acl_dst = nullptr); +void aclnn_add(ggml_backend_cann_context & ctx, + aclTensor * acl_src0, + aclTensor * acl_src1, + aclTensor * acl_dst = nullptr); /** * @brief Sub two tensors element-wise and stores the result in a destination @@ -527,8 +528,10 @@ void aclnn_add(ggml_backend_cann_context& ctx, aclTensor* acl_src0, * @param acl_src1 The second source tensor. * @param acl_dst The destination tensor where the result will be stored. */ -void aclnn_sub(ggml_backend_cann_context& ctx, aclTensor* acl_src0, - aclTensor* acl_src1, aclTensor* acl_dst = nullptr); +void aclnn_sub(ggml_backend_cann_context & ctx, + aclTensor * acl_src0, + aclTensor * acl_src1, + aclTensor * acl_dst = nullptr); /** * @brief Performs element-wise multiplication of two tensors and stores the @@ -546,8 +549,10 @@ void aclnn_sub(ggml_backend_cann_context& ctx, aclTensor* acl_src0, * @param acl_other The second tensor for element-wise multiplication. * @param acl_dst The destination tensor where the result will be stored. */ -void aclnn_mul(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_other, aclTensor* acl_dst = nullptr); +void aclnn_mul(ggml_backend_cann_context & ctx, + aclTensor * acl_src, + aclTensor * acl_other, + aclTensor * acl_dst = nullptr); /** * @brief Matrix division, optionally in-place. @@ -567,8 +572,10 @@ void aclnn_mul(ggml_backend_cann_context& ctx, aclTensor* acl_src, * @param inplace Flag indicating whether to perform the operation in-place on * `acl_src`. */ -void aclnn_div(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_other, aclTensor* acl_dst = nullptr); +void aclnn_div(ggml_backend_cann_context & ctx, + aclTensor * acl_src, + aclTensor * acl_other, + aclTensor * acl_dst = nullptr); /** * @brief Applies element-wise cosine function to the elements of a tensor. @@ -584,8 +591,7 @@ void aclnn_div(ggml_backend_cann_context& ctx, aclTensor* acl_src, * @param acl_dst The destination tensor where the cosine results will be * stored. */ -void aclnn_cos(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_dst); +void aclnn_cos(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst); /** * @brief Applies element-wise sine function to the elements of a tensor. @@ -602,8 +608,7 @@ void aclnn_cos(ggml_backend_cann_context& ctx, aclTensor* acl_src, * @param acl_src The source tensor on which the sine function will be applied. * @param acl_dst The destination tensor where the sine results will be stored. */ -void aclnn_sin(ggml_backend_cann_context& ctx, aclTensor* acl_src, - aclTensor* acl_dst); +void aclnn_sin(ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst); /** * @brief Prepares broadcast-compatible ACL tensors for two input tensors and one @@ -621,8 +626,12 @@ void aclnn_sin(ggml_backend_cann_context& ctx, aclTensor* acl_src, * @param acl_src1 Output pointer to the created ACL tensor corresponding to src1. * @param acl_dst Output pointer to the created ACL tensor corresponding to dst. */ -void bcast_shape(ggml_tensor * src0, ggml_tensor * src1, ggml_tensor * dst, - aclTensor ** acl_src0, aclTensor ** acl_src1, aclTensor ** acl_dst); +void bcast_shape(ggml_tensor * src0, + ggml_tensor * src1, + ggml_tensor * dst, + aclTensor ** acl_src0, + aclTensor ** acl_src1, + aclTensor ** acl_dst); /** * @brief Computes the 1D transposed convolution (deconvolution) of a ggml @@ -637,7 +646,7 @@ void bcast_shape(ggml_tensor * src0, ggml_tensor * src1, ggml_tensor * dst, * @param dst The destination tensor where the transposed convolution result * will be stored. dst->op is `GGML_OP_CONV_TRANSPOSE_1D`. */ -void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_conv_transpose_1d(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies the ELU (Exponential Linear Unit) activation to a ggml tensor @@ -662,7 +671,7 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds * @param dst The destination tensor where the ELU-activated result will be stored. * dst->op is expected to be `GGML_OP_ELU`. */ -void ggml_cann_elu(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_elu(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Computes the mean of a ggml tensor element-wise using the CANN backend. @@ -677,7 +686,7 @@ void ggml_cann_elu(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the mean result will be stored. * dst->op is expected to be `GGML_OP_MEAN`. */ -void ggml_cann_mean(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_mean(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies 1D reflect padding to a ggml tensor using the CANN backend. @@ -692,7 +701,7 @@ void ggml_cann_mean(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the padded result will be stored. * dst->op is expected to be `GGML_OP_PAD_REFLECT_1D`. */ -void ggml_cann_pad_reflect_1d(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_pad_reflect_1d(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Counts the number of equal elements in two ggml tensors using the CANN backend. @@ -708,7 +717,7 @@ void ggml_cann_pad_reflect_1d(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the result will be stored. * dst->op is expected to be `GGML_OP_COUNT_EQUAL`. */ -void ggml_cann_count_equal(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_count_equal(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Applies the Step activation function to a ggml tensor using the CANN backend. @@ -723,7 +732,7 @@ void ggml_cann_count_equal(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the result will be stored. * dst->op is expected to be `GGML_OP_STEP`. */ -void ggml_cann_step(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_step(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Performs the Flash Attention extended operator using the CANN backend. @@ -738,59 +747,46 @@ void ggml_cann_step(ggml_backend_cann_context& ctx, ggml_tensor* dst); * @param dst The destination tensor where the result will be stored. * dst->op is expected to be `GGML_OP_FLASH_ATTN_EXT`. */ -void ggml_cann_flash_attn_ext(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_flash_attn_ext(ggml_backend_cann_context & ctx, ggml_tensor * dst); /* * @brief A generic wrapper for ACL resources with custom deleter support. */ -using any_acl_resource = std::unique_ptr>; +using any_acl_resource = std::unique_ptr>; /** * @brief Trait structure used to define how to destroy a given ACL resource type. * * @tparam T ACL resource type. */ -template -struct acl_resource_traits; +template struct acl_resource_traits; /** * @brief Specialization for aclTensor, defines how to destroy an aclTensor resource. */ -template<> -struct acl_resource_traits { - static void destroy(void* p) { - ACL_CHECK(aclDestroyTensor(static_cast(p))); - } +template <> struct acl_resource_traits { + static void destroy(void * p) { ACL_CHECK(aclDestroyTensor(static_cast(p))); } }; /** * @brief Specialization for aclIntArray, defines how to destroy an aclIntArray resource. */ -template<> -struct acl_resource_traits { - static void destroy(void* p) { - ACL_CHECK(aclDestroyIntArray(static_cast(p))); - } +template <> struct acl_resource_traits { + static void destroy(void * p) { ACL_CHECK(aclDestroyIntArray(static_cast(p))); } }; /** * @brief Specialization for aclScalar, defines how to destroy an aclScalar resource. */ -template<> -struct acl_resource_traits { - static void destroy(void* p) { - ACL_CHECK(aclDestroyScalar(static_cast(p))); - } +template <> struct acl_resource_traits { + static void destroy(void * p) { ACL_CHECK(aclDestroyScalar(static_cast(p))); } }; /** * @brief Specialization for aclTensorList, defines how to destroy an aclTensorList resource. */ -template<> -struct acl_resource_traits { - static void destroy(void* p) { - ACL_CHECK(aclDestroyTensorList(static_cast(p))); - } +template <> struct acl_resource_traits { + static void destroy(void * p) { ACL_CHECK(aclDestroyTensorList(static_cast(p))); } }; /** @@ -800,14 +796,8 @@ struct acl_resource_traits { * @param ptr Raw pointer to ACL resource. * @return any_acl_resource Smart pointer that handles destruction. */ -template -any_acl_resource make_acl_resource(T* ptr) { - return any_acl_resource( - static_cast(ptr), - [](void* p) { - acl_resource_traits::destroy(p); - } - ); +template any_acl_resource make_acl_resource(T * ptr) { + return any_acl_resource(static_cast(ptr), [](void * p) { acl_resource_traits::destroy(p); }); } /** @@ -817,8 +807,7 @@ any_acl_resource make_acl_resource(T* ptr) { * @param vec Target vector to hold ACL resources. * @param args Raw pointers to ACL resources. */ -template -void register_acl_resources(std::vector& vec, Args*... args) { +template void register_acl_resources(std::vector & vec, Args *... args) { (vec.emplace_back(make_acl_resource(args)), ...); } @@ -826,39 +815,36 @@ void register_acl_resources(std::vector& vec, Args*... args) { * @brief Task class that wraps the execution of an aclnn function call. */ class aclnn_task : public cann_task { - public: - aclnn_task(aclnn_func_t aclnn_func, void * workspace_addr, - uint64_t workspace_size, aclOpExecutor * executor, - aclrtStream stream) : - aclnn_func_(aclnn_func), - workspace_addr_(workspace_addr), - workspace_size_(workspace_size), - executor_(executor), - stream_(stream) {} - virtual void run_task() override { - ACL_CHECK(aclnn_func_(workspace_addr_, workspace_size_, executor_, stream_)); - } - private: - aclnn_func_t aclnn_func_; - void * workspace_addr_; - uint64_t workspace_size_; - aclOpExecutor * executor_; - aclrtStream stream_; + public: + aclnn_task(aclnn_func_t aclnn_func, + void * workspace_addr, + uint64_t workspace_size, + aclOpExecutor * executor, + aclrtStream stream) : + aclnn_func_(aclnn_func), + workspace_addr_(workspace_addr), + workspace_size_(workspace_size), + executor_(executor), + stream_(stream) {} + + virtual void run_task() override { ACL_CHECK(aclnn_func_(workspace_addr_, workspace_size_, executor_, stream_)); } + private: + aclnn_func_t aclnn_func_; + void * workspace_addr_; + uint64_t workspace_size_; + aclOpExecutor * executor_; + aclrtStream stream_; }; /** * @brief Task class that releases ACL resources after usage. */ class release_resource_task : public cann_task { -public: - release_resource_task(std::vector&& resources){ - resource_ = std::move(resources); - } + public: + release_resource_task(std::vector && resources) { resource_ = std::move(resources); } - virtual void run_task() override { - resource_.clear(); - } -private: + virtual void run_task() override { resource_.clear(); } + private: std::vector resource_; }; @@ -866,38 +852,40 @@ class release_resource_task : public cann_task { * @brief Task class for performing asynchronous memory copy operations. */ class async_memcpy_task : public cann_task { -public: - async_memcpy_task(void* dst, const void* src, size_t size, - aclrtMemcpyKind kind, aclrtStream stream) - : dst_(dst), src_(src), size_(size), kind_(kind), stream_(stream) {} - - virtual void run_task() override { - ACL_CHECK(aclrtMemcpyAsync(dst_, size_, src_, size_, kind_, stream_)); - } -private: - void* dst_; - const void* src_; - size_t size_; + public: + async_memcpy_task(void * dst, const void * src, size_t size, aclrtMemcpyKind kind, aclrtStream stream) : + dst_(dst), + src_(src), + size_(size), + kind_(kind), + stream_(stream) {} + + virtual void run_task() override { ACL_CHECK(aclrtMemcpyAsync(dst_, size_, src_, size_, kind_, stream_)); } + private: + void * dst_; + const void * src_; + size_t size_; aclrtMemcpyKind kind_; - aclrtStream stream_; + aclrtStream stream_; }; /** * @brief Task class for performing asynchronous memory set operations. */ class async_memset_task : public cann_task { - public: - async_memset_task(void* buffer, size_t size, int32_t value, aclrtStream stream) - : buffer_(buffer), size_(size), value_(value), stream_(stream) {} - - virtual void run_task() override { - ACL_CHECK(aclrtMemsetAsync(buffer_, size_, value_, size_, stream_)); - } - private: - void* buffer_; - size_t size_; - int32_t value_; - aclrtStream stream_; + public: + async_memset_task(void * buffer, size_t size, int32_t value, aclrtStream stream) : + buffer_(buffer), + size_(size), + value_(value), + stream_(stream) {} + + virtual void run_task() override { ACL_CHECK(aclrtMemsetAsync(buffer_, size_, value_, size_, stream_)); } + private: + void * buffer_; + size_t size_; + int32_t value_; + aclrtStream stream_; }; /** @@ -918,25 +906,24 @@ class async_memset_task : public cann_task { * same stream are executed in queue order. */ -#define GGML_CANN_CALL_ACLNN_OP(CTX, OP_NAME, ...) \ - do { \ - uint64_t workspaceSize = 0; \ - aclOpExecutor * executor; \ - void * workspaceAddr = nullptr; \ - ACL_CHECK(aclnn##OP_NAME##GetWorkspaceSize(__VA_ARGS__, &workspaceSize, &executor));\ - /* workspace should alloced in main thread to keep malloc order when using vmm. */ \ - if (workspaceSize > 0) { \ - ggml_cann_pool_alloc workspace_allocator(CTX.pool(), workspaceSize); \ - workspaceAddr = workspace_allocator.get(); \ - } \ - if (CTX.async_mode) { \ - auto task = \ - std::make_unique(aclnn##OP_NAME, workspaceAddr, workspaceSize, \ - executor, CTX.stream()); \ - CTX.task_queue.submit_task(std::move(task)); \ - } else { \ - ACL_CHECK(aclnn##OP_NAME(workspaceAddr, workspaceSize, executor, CTX.stream()));\ - } \ +#define GGML_CANN_CALL_ACLNN_OP(CTX, OP_NAME, ...) \ + do { \ + uint64_t workspaceSize = 0; \ + aclOpExecutor * executor; \ + void * workspaceAddr = nullptr; \ + ACL_CHECK(aclnn##OP_NAME##GetWorkspaceSize(__VA_ARGS__, &workspaceSize, &executor)); \ + /* workspace should alloced in main thread to keep malloc order when using vmm. */ \ + if (workspaceSize > 0) { \ + ggml_cann_pool_alloc workspace_allocator(CTX.pool(), workspaceSize); \ + workspaceAddr = workspace_allocator.get(); \ + } \ + if (CTX.async_mode) { \ + auto task = \ + std::make_unique(aclnn##OP_NAME, workspaceAddr, workspaceSize, executor, CTX.stream()); \ + CTX.task_queue.submit_task(std::move(task)); \ + } else { \ + ACL_CHECK(aclnn##OP_NAME(workspaceAddr, workspaceSize, executor, CTX.stream())); \ + } \ } while (0) /** @@ -947,11 +934,10 @@ class async_memset_task : public cann_task { * @param ctx Backend context which manages task submission and async mode. * @param args Pointers to ACL resources to be released. */ -template -void ggml_cann_release_resources(ggml_backend_cann_context & ctx, Args &&... args) { +template void ggml_cann_release_resources(ggml_backend_cann_context & ctx, Args &&... args) { std::vector resources; register_acl_resources(resources, std::forward(args)...); - if(ctx.async_mode) { + if (ctx.async_mode) { auto task = std::make_unique(std::move(resources)); ctx.task_queue.submit_task(std::move(task)); } @@ -966,8 +952,11 @@ void ggml_cann_release_resources(ggml_backend_cann_context & ctx, Args &&... arg * @param len Size of memory to copy (in bytes). * @param kind Type of memory copy (host-to-device, device-to-host, etc). */ -inline void ggml_cann_async_memcpy(ggml_backend_cann_context & ctx, void * dst, - const void * src, size_t len, aclrtMemcpyKind kind) { +inline void ggml_cann_async_memcpy(ggml_backend_cann_context & ctx, + void * dst, + const void * src, + size_t len, + aclrtMemcpyKind kind) { if (ctx.async_mode) { auto task = std::make_unique(dst, const_cast(src), len, kind, ctx.stream()); ctx.task_queue.submit_task(std::move(task)); @@ -976,8 +965,11 @@ inline void ggml_cann_async_memcpy(ggml_backend_cann_context & ctx, void * dst, } } -inline void ggml_cann_async_memcpy(ggml_backend_cann_context * ctx, void * dst, - const void * src, size_t len, aclrtMemcpyKind kind) { +inline void ggml_cann_async_memcpy(ggml_backend_cann_context * ctx, + void * dst, + const void * src, + size_t len, + aclrtMemcpyKind kind) { if (ctx->async_mode) { auto task = std::make_unique(dst, const_cast(src), len, kind, ctx->stream()); ctx->task_queue.submit_task(std::move(task)); @@ -994,8 +986,7 @@ inline void ggml_cann_async_memcpy(ggml_backend_cann_context * ctx, void * dst, * @param size Size of the memory buffer (in bytes). * @param value Value to set in the buffer. */ -inline void ggml_cann_async_memset(ggml_backend_cann_context & ctx, void * buffer, - size_t size, int value) { +inline void ggml_cann_async_memset(ggml_backend_cann_context & ctx, void * buffer, size_t size, int value) { if (ctx.async_mode) { auto task = std::make_unique(buffer, size, value, ctx.stream()); ctx.task_queue.submit_task(std::move(task)); @@ -1029,7 +1020,7 @@ inline void ggml_cann_async_memset(ggml_backend_cann_context & ctx, void * buffe * @param dst The destination tensor where the expert-weighted token outputs are stored. * Expected to be of shape [M, K, N, 1]. */ -void ggml_cann_mul_mat_id(ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_mul_mat_id(ggml_backend_cann_context & ctx, ggml_tensor * dst); /** * @brief Check whether a tensor is a weight tensor for matrix multiplication. @@ -1041,20 +1032,14 @@ void ggml_cann_mul_mat_id(ggml_backend_cann_context& ctx, ggml_tensor* dst); * * @param tensor Pointer to the target ggml_tensor object (const-qualified). */ -static bool is_matmul_weight(const ggml_tensor* tensor) { - std::string name = ggml_get_name(tensor); - static const std::unordered_set weight_suffixes{ - "output.weight", - "attn_q.weight", - "attn_k.weight", - "attn_v.weight", - "attn_output.weight", - "ffn_gate.weight", - "ffn_up.weight", - "ffn_down.weight" - }; - - for (const auto& suffix : weight_suffixes) { +static bool is_matmul_weight(const ggml_tensor * tensor) { + std::string name = ggml_get_name(tensor); + static const std::unordered_set weight_suffixes{ "output.weight", "attn_q.weight", + "attn_k.weight", "attn_v.weight", + "attn_output.weight", "ffn_gate.weight", + "ffn_up.weight", "ffn_down.weight" }; + + for (const auto & suffix : weight_suffixes) { if (name.find(suffix) != std::string::npos) { return true; } @@ -1078,14 +1063,13 @@ static bool is_matmul_weight(const ggml_tensor* tensor) { * @param ctx The CANN backend context used to manage execution and resources. * @param dst The destination tensor. */ -template -void ggml_cann_binary_op(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src0 = dst->src[0]; - ggml_tensor* src1 = dst->src[1]; +template void ggml_cann_binary_op(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src0 = dst->src[0]; + ggml_tensor * src1 = dst->src[1]; - aclTensor* acl_src0; - aclTensor* acl_src1; - aclTensor* acl_dst; + aclTensor * acl_src0; + aclTensor * acl_src1; + aclTensor * acl_dst; // Need bcast bcast_shape(src0, src1, dst, &acl_src0, &acl_src1, &acl_dst); @@ -1094,7 +1078,6 @@ void ggml_cann_binary_op(ggml_backend_cann_context& ctx, ggml_tensor* dst) { ggml_cann_release_resources(ctx, acl_src0, acl_src1, acl_dst); } - /** * @brief Applies a unary operation to an input tensor using the CANN backend. * @@ -1107,12 +1090,12 @@ void ggml_cann_binary_op(ggml_backend_cann_context& ctx, ggml_tensor* dst) { * @param ctx The CANN backend context for managing resources and execution. * @param dst The destination tensor. Its src[0] is treated as the input tensor. */ -template - void ggml_cann_op_unary(ggml_backend_cann_context& ctx, ggml_tensor* dst) { - ggml_tensor* src = dst->src[0]; +template +void ggml_cann_op_unary(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; - aclTensor* acl_src = ggml_cann_create_tensor(src); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); unary_op(ctx, acl_src, acl_dst); ggml_cann_release_resources(ctx, acl_src, acl_dst); @@ -1138,9 +1121,9 @@ template * * @see GGML_CANN_CALL_OP_UNARY */ -void ggml_cann_op_unary( - std::function unary_op, - ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_op_unary(std::function unary_op, + ggml_backend_cann_context & ctx, + ggml_tensor * dst); /** * @brief Applies a gated (GLU-style) unary operation using the CANN backend. @@ -1172,9 +1155,9 @@ void ggml_cann_op_unary( * * @see GGML_CANN_CALL_OP_UNARY_GATED */ -void ggml_cann_op_unary_gated( - std::function unary_op, - ggml_backend_cann_context& ctx, ggml_tensor* dst); +void ggml_cann_op_unary_gated(std::function unary_op, + ggml_backend_cann_context & ctx, + ggml_tensor * dst); /** * @brief Helper macro to call a unary ACL operator via ggml_cann_op_unary. @@ -1197,16 +1180,13 @@ void ggml_cann_op_unary_gated( * @see ggml_cann_op_unary * @see GGML_CANN_CALL_ACLNN_OP */ -#define GGML_CANN_CALL_OP_UNARY(OP_NAME) \ - do { \ - auto lambda = [](ggml_backend_cann_context& ctx, \ - aclTensor* acl_src, \ - aclTensor* acl_dst) { \ - GGML_CANN_CALL_ACLNN_OP(ctx, OP_NAME, acl_src, acl_dst); \ - }; \ - ggml_cann_op_unary(lambda, ctx, dst); \ - } \ - while (0) +#define GGML_CANN_CALL_OP_UNARY(OP_NAME) \ + do { \ + auto lambda = [](ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { \ + GGML_CANN_CALL_ACLNN_OP(ctx, OP_NAME, acl_src, acl_dst); \ + }; \ + ggml_cann_op_unary(lambda, ctx, dst); \ + } while (0) /** * @brief Helper macro to call a gated unary ACL operator via ggml_cann_op_unary_gated. @@ -1229,15 +1209,12 @@ void ggml_cann_op_unary_gated( * @see ggml_cann_op_unary_gated * @see GGML_CANN_CALL_ACLNN_OP */ -#define GGML_CANN_CALL_OP_UNARY_GATED(OP_NAME) \ - do { \ - auto lambda = [](ggml_backend_cann_context& ctx, \ - aclTensor* acl_src, \ - aclTensor* acl_dst) { \ - GGML_CANN_CALL_ACLNN_OP(ctx, OP_NAME, acl_src, acl_dst); \ - }; \ - ggml_cann_op_unary_gated(lambda, ctx, dst); \ - } \ - while (0) +#define GGML_CANN_CALL_OP_UNARY_GATED(OP_NAME) \ + do { \ + auto lambda = [](ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { \ + GGML_CANN_CALL_ACLNN_OP(ctx, OP_NAME, acl_src, acl_dst); \ + }; \ + ggml_cann_op_unary_gated(lambda, ctx, dst); \ + } while (0) #endif // CANN_ACLNN_OPS diff --git a/ggml/src/ggml-cann/common.h b/ggml/src/ggml-cann/common.h old mode 100755 new mode 100644 index debbcadc1e4c5..e87dbcf329ff2 --- a/ggml/src/ggml-cann/common.h +++ b/ggml/src/ggml-cann/common.h @@ -44,7 +44,7 @@ #include "../include/ggml.h" #include "../ggml-impl.h" -#define MATRIX_ROW_PADDING 512 +#define MATRIX_ROW_PADDING 512 #define GGML_CANN_MAX_STREAMS 8 /** @@ -56,8 +56,7 @@ * @param line The line number at which the error occurred. * @param msg The error message. */ -[[noreturn]] void ggml_cann_error(const char* stmt, const char* func, - const char* file, int line, const char* msg); +[[noreturn]] void ggml_cann_error(const char * stmt, const char * func, const char * file, int line, const char * msg); /** * @brief Checks the result of a CANN function call and invokes the error @@ -89,25 +88,24 @@ struct ggml_cann_device_info { * @brief Information about a single CANN device. */ struct cann_device_info { - int cc; /**< Compute capability. */ + int cc; /**< Compute capability. */ size_t smpb; /**< Maximum shared memory per block. */ - bool vmm; /**< Virtual memory support. */ + bool vmm; /**< Virtual memory support. */ size_t vmm_granularity; /**< Granularity of virtual memory. */ size_t total_vram; /**< Total video RAM available on the device. */ }; - cann_device_info devices[GGML_CANN_MAX_DEVICES] = - {}; /**< Array of CANN device information. */ + cann_device_info devices[GGML_CANN_MAX_DEVICES] = {}; /**< Array of CANN device information. */ }; -const ggml_cann_device_info& ggml_cann_info(); +const ggml_cann_device_info & ggml_cann_info(); -void ggml_cann_set_device(int32_t device); +void ggml_cann_set_device(int32_t device); int32_t ggml_cann_get_device(); -std::optional get_env(const std::string& name); -bool parse_bool(const std::string& value); -int parse_integer(const std::string& value); +std::optional get_env(const std::string & name); +bool parse_bool(const std::string & value); +int parse_integer(const std::string & value); /** * @brief Abstract base class for memory pools used by CANN. @@ -126,7 +124,7 @@ struct ggml_cann_pool { * will be stored. * @return Pointer to the allocated memory block. */ - virtual void* alloc(size_t size, size_t* actual_size) = 0; + virtual void * alloc(size_t size, size_t * actual_size) = 0; /** * @brief Frees a previously allocated memory block. @@ -136,16 +134,16 @@ struct ggml_cann_pool { * @note Note that all CANN opertors are running async. Make sure memory is * still avaiable before this operator finished. */ - virtual void free(void* ptr, size_t size) = 0; + virtual void free(void * ptr, size_t size) = 0; }; /** * @brief RAII wrapper for managing memory allocations from a CANN memory pool. */ struct ggml_cann_pool_alloc { - ggml_cann_pool* pool = nullptr; /**< Pointer to the memory pool. */ - void* ptr = nullptr; /**< Pointer to the allocated memory block. */ - size_t actual_size = 0; /**< Actual size of the allocated memory block. */ + ggml_cann_pool * pool = nullptr; /**< Pointer to the memory pool. */ + void * ptr = nullptr; /**< Pointer to the allocated memory block. */ + size_t actual_size = 0; /**< Actual size of the allocated memory block. */ /** * @brief Default constructor. @@ -156,16 +154,14 @@ struct ggml_cann_pool_alloc { * @brief Constructor that initializes the memory pool. * @param pool Reference to the memory pool. */ - explicit ggml_cann_pool_alloc(ggml_cann_pool& pool) : pool(&pool) {} + explicit ggml_cann_pool_alloc(ggml_cann_pool & pool) : pool(&pool) {} /** * @brief Constructor that initializes the memory pool and allocates memory. * @param pool Reference to the memory pool. * @param size Size of the memory block to allocate. */ - ggml_cann_pool_alloc(ggml_cann_pool& pool, size_t size) : pool(&pool) { - alloc(size); - } + ggml_cann_pool_alloc(ggml_cann_pool & pool, size_t size) : pool(&pool) { alloc(size); } /** * @brief Destructor that frees the allocated memory block. @@ -181,7 +177,7 @@ struct ggml_cann_pool_alloc { * @param size Size of the memory block to allocate. * @return Pointer to the allocated memory block. */ - void* alloc(size_t size) { + void * alloc(size_t size) { GGML_ASSERT(pool != nullptr); GGML_ASSERT(ptr == nullptr); ptr = pool->alloc(size, &this->actual_size); @@ -194,7 +190,7 @@ struct ggml_cann_pool_alloc { * @param size Size of the memory block to allocate. * @return Pointer to the allocated memory block. */ - void* alloc(ggml_cann_pool& pool, size_t size) { + void * alloc(ggml_cann_pool & pool, size_t size) { this->pool = &pool; return alloc(size); } @@ -203,25 +199,25 @@ struct ggml_cann_pool_alloc { * @brief Gets the pointer to the allocated memory block. * @return Pointer to the allocated memory block. */ - void* get() { return ptr; } + void * get() { return ptr; } // Deleted copy constructor - ggml_cann_pool_alloc(const ggml_cann_pool_alloc&) = delete; + ggml_cann_pool_alloc(const ggml_cann_pool_alloc &) = delete; // Deleted move constructor - ggml_cann_pool_alloc(ggml_cann_pool_alloc&&) = delete; + ggml_cann_pool_alloc(ggml_cann_pool_alloc &&) = delete; // Deleted copy assignment operator - ggml_cann_pool_alloc& operator=(const ggml_cann_pool_alloc&) = delete; + ggml_cann_pool_alloc & operator=(const ggml_cann_pool_alloc &) = delete; // Deleted move assignment operator - ggml_cann_pool_alloc& operator=(ggml_cann_pool_alloc&&) = delete; + ggml_cann_pool_alloc & operator=(ggml_cann_pool_alloc &&) = delete; }; /** * @brief Function pointer type for ACLNN operator calls. */ -using aclnn_func_t = aclnnStatus (*)(void*, uint64_t, aclOpExecutor*, aclrtStream); +using aclnn_func_t = aclnnStatus (*)(void *, uint64_t, aclOpExecutor *, aclrtStream); /** * @brief Base class for all CANN tasks to be submitted to the task queue. @@ -229,7 +225,7 @@ using aclnn_func_t = aclnnStatus (*)(void*, uint64_t, aclOpExecutor*, aclrtStrea * Users should override the run_task() method with actual task logic. */ class cann_task { -public: + public: virtual void run_task() {} }; @@ -237,16 +233,20 @@ class cann_task { * @brief A lock-free ring-buffer based task queue for asynchronously executing cann_task instances. */ class cann_task_queue { -public: + public: /** * @brief Constructs a task queue with a fixed power-of-two capacity for a specific device. * * @param capacity Queue capacity. Must be a power of 2. * @param device Target device ID (used for context setting). */ - explicit cann_task_queue(size_t capacity, int32_t device) - : buffer_(capacity), capacity_(capacity), head_(0), tail_(0), - running_(false), device_(device) { + explicit cann_task_queue(size_t capacity, int32_t device) : + buffer_(capacity), + capacity_(capacity), + head_(0), + tail_(0), + running_(false), + device_(device) { GGML_ASSERT((capacity & (capacity - 1)) == 0 && "capacity must be power of 2"); mask_ = capacity_ - 1; } @@ -257,7 +257,7 @@ class cann_task_queue { * @param item Unique pointer to the task. * @return true if the task was successfully enqueued, false if the queue was full. */ - bool enqueue(std::unique_ptr&& item) { + bool enqueue(std::unique_ptr && item) { size_t next_tail = (tail_ + 1) & mask_; if (next_tail == head_) { @@ -276,17 +276,16 @@ class cann_task_queue { * * @param task Task to be submitted. */ - void submit_task(std::unique_ptr&& task) { - while(!enqueue(std::move(task))) { + void submit_task(std::unique_ptr && task) { + while (!enqueue(std::move(task))) { std::this_thread::yield(); continue; } if (!running_) { running_ = true; - thread_ = std::thread(&cann_task_queue::execute, this); + thread_ = std::thread(&cann_task_queue::execute, this); } - } /** @@ -309,7 +308,7 @@ class cann_task_queue { } } -private: + private: /** * @brief Worker thread function that continuously dequeues and executes tasks. */ @@ -317,7 +316,7 @@ class cann_task_queue { ggml_cann_set_device(device_); while (running_) { - if(head_ == tail_) { + if (head_ == tail_) { std::this_thread::yield(); continue; } @@ -330,24 +329,24 @@ class cann_task_queue { } std::vector> buffer_; - const size_t capacity_; - size_t mask_; - size_t head_; - size_t tail_; - bool running_; - std::thread thread_; - int32_t device_; + const size_t capacity_; + size_t mask_; + size_t head_; + size_t tail_; + bool running_; + std::thread thread_; + int32_t device_; }; #ifdef USE_ACL_GRAPH struct ggml_graph_node_properties { // dst tensor - void * node_address; + void * node_address; int64_t ne[GGML_MAX_DIMS]; - size_t nb[GGML_MAX_DIMS]; + size_t nb[GGML_MAX_DIMS]; // src tensor - void * src_address[GGML_MAX_SRC]; + void * src_address[GGML_MAX_SRC]; int64_t src_ne[GGML_MAX_SRC][GGML_MAX_DIMS]; size_t src_nb[GGML_MAX_SRC][GGML_MAX_DIMS]; @@ -376,13 +375,11 @@ struct ggml_cann_graph { * move existing graphs to the front (most recently used), and clear the cache. */ struct ggml_cann_graph_lru_cache { - size_t capacity; /**< Maximum number of graphs in the cache. */ + size_t capacity; /**< Maximum number of graphs in the cache. */ - std::list cache_list; /**< List storing cached graphs as raw pointers. */ + std::list cache_list; /**< List storing cached graphs as raw pointers. */ - ggml_cann_graph_lru_cache() { - capacity = parse_integer(get_env("GGML_CANN_GRAPH_CACHE_CAPACITY").value_or("12")); - } + ggml_cann_graph_lru_cache() { capacity = parse_integer(get_env("GGML_CANN_GRAPH_CACHE_CAPACITY").value_or("12")); } /** * @brief Push a new graph to the front of the cache. @@ -390,11 +387,11 @@ struct ggml_cann_graph_lru_cache { * @param new_node Pointer to the new ggml_cann_graph to cache. * Ownership is transferred to the cache (cache will delete it). */ - void push(ggml_cann_graph* new_node) { + void push(ggml_cann_graph * new_node) { if (cache_list.size() >= capacity) { - ggml_cann_graph* old = cache_list.back(); + ggml_cann_graph * old = cache_list.back(); cache_list.pop_back(); - delete old; // free the old graph + delete old; // free the old graph } cache_list.push_front(new_node); } @@ -403,7 +400,7 @@ struct ggml_cann_graph_lru_cache { * @brief Move an existing graph to the front of the cache. * @param node Pointer to the ggml_cann_graph to move. */ - void move_to_front(ggml_cann_graph* node) { + void move_to_front(ggml_cann_graph * node) { cache_list.remove(node); cache_list.push_front(node); } @@ -421,92 +418,89 @@ struct ggml_cann_graph_lru_cache { /** * @brief Destructor that clears the cache and frees all cached graphs. */ - ~ggml_cann_graph_lru_cache() { - clear(); - } + ~ggml_cann_graph_lru_cache() { clear(); } }; #endif // USE_ACL_GRAPH struct ggml_cann_rope_cache { ~ggml_cann_rope_cache() { - if(theta_scale_cache != nullptr) { + if (theta_scale_cache != nullptr) { ACL_CHECK(aclrtFree(theta_scale_cache)); } - if(sin_cache != nullptr) { + if (sin_cache != nullptr) { ACL_CHECK(aclrtFree(sin_cache)); } - if(cos_cache != nullptr) { + if (cos_cache != nullptr) { ACL_CHECK(aclrtFree(cos_cache)); } } - void* theta_scale_cache = nullptr; + void * theta_scale_cache = nullptr; int64_t theta_scale_length = 0; // sin/cos cache, used only to accelerate first layer on each device - void* sin_cache = nullptr; - void* cos_cache = nullptr; - int64_t position_length = 0; + void * sin_cache = nullptr; + void * cos_cache = nullptr; + int64_t position_length = 0; // Properties to check before reusing the sincos cache - bool cached = false; - float ext_factor = 0.0f; - float theta_scale = 0.0f; - float freq_scale = 0.0f; - float attn_factor = 0.0f; - bool is_neox = false; + bool cached = false; + float ext_factor = 0.0f; + float theta_scale = 0.0f; + float freq_scale = 0.0f; + float attn_factor = 0.0f; + bool is_neox = false; }; struct ggml_cann_tensor_cache { ~ggml_cann_tensor_cache() { - if(cache != nullptr) { + if (cache != nullptr) { ACL_CHECK(aclrtFree(cache)); } } - void* cache = nullptr; - int64_t size = 0; + void * cache = nullptr; + int64_t size = 0; }; /** * @brief Context for managing CANN backend operations. */ struct ggml_backend_cann_context { - int32_t device; /**< Device ID. */ - std::string name; /**< Name of the device. */ - std::string description; /**< Description of the device. */ - aclrtEvent copy_event = nullptr; /**< Event for managing copy operations. */ + int32_t device; /**< Device ID. */ + std::string name; /**< Name of the device. */ + std::string description; /**< Description of the device. */ + aclrtEvent copy_event = nullptr; /**< Event for managing copy operations. */ #ifdef USE_ACL_GRAPH /// Cached CANN ACL graph used for executing the current ggml computation graph. ggml_cann_graph_lru_cache graph_lru_cache; - bool acl_graph_mode = true; + bool acl_graph_mode = true; #endif - cann_task_queue task_queue; - bool async_mode; + cann_task_queue task_queue; + bool async_mode; // Rope Cache - ggml_cann_rope_cache rope_cache; + ggml_cann_rope_cache rope_cache; // Constant Pool ggml_cann_tensor_cache rms_norm_one_tensor_cache; ggml_cann_tensor_cache rms_norm_zero_tensor_cache; - aclrtStream streams[GGML_CANN_MAX_STREAMS] = {nullptr}; /**< Array of streams for the device. */ + aclrtStream streams[GGML_CANN_MAX_STREAMS] = { nullptr }; /**< Array of streams for the device. */ /** * @brief Constructor for initializing the context with a given device. * @param device Device ID. */ - explicit ggml_backend_cann_context(int device) - : device(device), name("CANN" + std::to_string(device)), task_queue(1024, device) { + explicit ggml_backend_cann_context(int device) : + device(device), + name("CANN" + std::to_string(device)), + task_queue(1024, device) { ggml_cann_set_device(device); description = aclrtGetSocName(); async_mode = parse_bool(get_env("GGML_CANN_ASYNC_MODE").value_or("")); - GGML_LOG_INFO("%s: device %d async operator submission is %s\n", __func__, - device, async_mode ? "ON" : "OFF"); + GGML_LOG_INFO("%s: device %d async operator submission is %s\n", __func__, device, async_mode ? "ON" : "OFF"); #ifdef USE_ACL_GRAPH acl_graph_mode = parse_bool(get_env("GGML_CANN_ACL_GRAPH").value_or("on")); - GGML_LOG_INFO("%s: device %d execution mode is %s (%s)\n", - __func__, device, - acl_graph_mode ? "GRAPH" : "EAGER", - acl_graph_mode ? "acl graph enabled" : "acl graph disabled"); + GGML_LOG_INFO("%s: device %d execution mode is %s (%s)\n", __func__, device, acl_graph_mode ? "GRAPH" : "EAGER", + acl_graph_mode ? "acl graph enabled" : "acl graph disabled"); #endif } @@ -549,8 +543,7 @@ struct ggml_backend_cann_context { aclrtStream stream() { return stream(0); } // TODO: each stream should have a memory pool. - std::unique_ptr - mem_pool; /**< Memory pool for the device. */ + std::unique_ptr mem_pool; /**< Memory pool for the device. */ /** * @brief Create a new memory pool for a given device. @@ -563,7 +556,7 @@ struct ggml_backend_cann_context { * @brief Get or create the memory pool for the context. * @return Reference to the memory pool. */ - ggml_cann_pool& pool() { + ggml_cann_pool & pool() { if (mem_pool == nullptr) { mem_pool = new_pool_for_device(device); } diff --git a/ggml/src/ggml-cann/ggml-cann.cpp b/ggml/src/ggml-cann/ggml-cann.cpp old mode 100755 new mode 100644 index ad1adba6b3a8a..8bd5449f1f75f --- a/ggml/src/ggml-cann/ggml-cann.cpp +++ b/ggml/src/ggml-cann/ggml-cann.cpp @@ -56,14 +56,12 @@ * @param line The line number where the error occurred. * @param msg The error message. */ -[[noreturn]] void ggml_cann_error(const char* stmt, const char* func, - const char* file, int line, const char* msg) { +[[noreturn]] void ggml_cann_error(const char * stmt, const char * func, const char * file, int line, const char * msg) { int32_t id = -1; aclrtGetDevice(&id); GGML_LOG_ERROR("CANN error: %s\n", msg); - GGML_LOG_ERROR(" current device: %d, in function %s at %s:%d\n", id, func, - file, line); + GGML_LOG_ERROR(" current device: %d, in function %s at %s:%d\n", id, func, file, line); GGML_LOG_ERROR(" %s\n", stmt); // abort with GGML_ASSERT to get a stack trace GGML_ABORT("CANN error"); @@ -79,7 +77,7 @@ void ggml_cann_set_device(const int32_t device) { aclrtGetDevice(¤t_device); if (device == current_device) { - return; + return; } ACL_CHECK(aclrtSetDevice(device)); } @@ -99,9 +97,11 @@ int32_t ggml_cann_get_device() { * @brief Get the value of the specified environment variable (name). * if not empty, return a std::string object */ -std::optional get_env(const std::string& name) { - const char* val = std::getenv(name.c_str()); - if (!val) return std::nullopt; +std::optional get_env(const std::string & name) { + const char * val = std::getenv(name.c_str()); + if (!val) { + return std::nullopt; + } std::string res = std::string(val); std::transform(res.begin(), res.end(), res.begin(), ::tolower); return res; @@ -110,8 +110,8 @@ std::optional get_env(const std::string& name) { /** * @brief Verify whether the environment variable is a valid value. */ -bool parse_bool(const std::string& value) { - std::unordered_set valid_values = {"on", "1", "yes", "y", "enable", "true"}; +bool parse_bool(const std::string & value) { + std::unordered_set valid_values = { "on", "1", "yes", "y", "enable", "true" }; return valid_values.find(value) != valid_values.end(); } @@ -125,7 +125,7 @@ bool parse_bool(const std::string& value) { * @param value The string to parse. * @return The parsed integer, or 0 if conversion fails. */ -int parse_integer(const std::string& value) { +int parse_integer(const std::string & value) { try { return std::stoi(value); } catch (...) { @@ -144,11 +144,10 @@ int parse_integer(const std::string& value) { static ggml_cann_device_info ggml_cann_init() { ggml_cann_device_info info = {}; - aclError err = aclrtGetDeviceCount((uint32_t*)&info.device_count); + aclError err = aclrtGetDeviceCount((uint32_t *) &info.device_count); if (err != ACL_SUCCESS) { - GGML_LOG_ERROR("%s: failed to initialize CANN: %s\n", - __func__, aclGetRecentErrMsg()); + GGML_LOG_ERROR("%s: failed to initialize CANN: %s\n", __func__, aclGetRecentErrMsg()); return info; } @@ -156,16 +155,15 @@ static ggml_cann_device_info ggml_cann_init() { for (int id = 0; id < info.device_count; ++id) { aclrtPhysicalMemProp prop = {}; - prop.handleType = ACL_MEM_HANDLE_TYPE_NONE; - prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED; - prop.memAttr = ACL_HBM_MEM_HUGE; - prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE; - prop.location.id = id; - prop.reserve = 0; - err = aclrtMemGetAllocationGranularity( - &prop, ACL_RT_MEM_ALLOC_GRANULARITY_RECOMMENDED, - &info.devices[id].vmm_granularity); - info.devices[id].vmm = err == ACL_SUCCESS; + prop.handleType = ACL_MEM_HANDLE_TYPE_NONE; + prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED; + prop.memAttr = ACL_HBM_MEM_HUGE; + prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE; + prop.location.id = id; + prop.reserve = 0; + err = aclrtMemGetAllocationGranularity(&prop, ACL_RT_MEM_ALLOC_GRANULARITY_RECOMMENDED, + &info.devices[id].vmm_granularity); + info.devices[id].vmm = err == ACL_SUCCESS; size_t free, total; ggml_backend_cann_get_device_memory(id, &free, &total); @@ -185,7 +183,7 @@ static ggml_cann_device_info ggml_cann_init() { * * @return A reference to the structure containing the device information. */ -const ggml_cann_device_info& ggml_cann_info() { +const ggml_cann_device_info & ggml_cann_info() { static ggml_cann_device_info info = ggml_cann_init(); return info; } @@ -205,7 +203,7 @@ struct ggml_cann_pool_buf_prio : public ggml_cann_pool { /** * @brief The minimum free margin for a buffer. */ - static const size_t min_free_margin = 1ull << 20; // 1MB + static const size_t min_free_margin = 1ull << 20; // 1MB /** * @brief The alignment for buffer allocation. @@ -226,22 +224,18 @@ struct ggml_cann_pool_buf_prio : public ggml_cann_pool { * @brief Structure representing a CANN buffer. */ struct ggml_cann_buffer { - void* ptr = nullptr; ///< Pointer to the buffer. - size_t size = 0; ///< Size of the buffer. - std::chrono::steady_clock::time_point last_used; ///< Last used time. + void * ptr = nullptr; ///< Pointer to the buffer. + size_t size = 0; ///< Size of the buffer. + std::chrono::steady_clock::time_point last_used; ///< Last used time. - bool operator>(const ggml_cann_buffer& other) const { - return size > other.size; - } + bool operator>(const ggml_cann_buffer & other) const { return size > other.size; } }; /** * @brief Array of CANN buffers in the pool. */ - std::unordered_map buffer_pool; - std::priority_queue, - std::greater<>> free_buffers ; + std::unordered_map buffer_pool; + std::priority_queue, std::greater<>> free_buffers; /** * @brief Total size of all buffers in the pool. @@ -262,7 +256,7 @@ struct ggml_cann_pool_buf_prio : public ggml_cann_pool { */ ~ggml_cann_pool_buf_prio() { ggml_cann_set_device(device); - for (auto& [b_ptr, b_size] : buffer_pool) { + for (auto & [b_ptr, b_size] : buffer_pool) { aclrtFree(b_ptr); pool_size -= b_size; } @@ -278,14 +272,14 @@ struct ggml_cann_pool_buf_prio : public ggml_cann_pool { * the allocated buffer. * @return A pointer to the allocated buffer. */ - void* alloc(size_t size, size_t* actual_size) override { + void * alloc(size_t size, size_t * actual_size) override { size = GGML_PAD(size, alignment); if (size == 0) { size = alignment; } - void* ptr = nullptr; - auto now = std::chrono::steady_clock::now(); + void * ptr = nullptr; + auto now = std::chrono::steady_clock::now(); std::vector free_buffers_rest; free_buffers_rest.reserve(free_buffers.size()); @@ -298,24 +292,22 @@ struct ggml_cann_pool_buf_prio : public ggml_cann_pool { const size_t margin = b.size - size; if (margin <= max_reuse_margin) { *actual_size = b.size; - ptr = b.ptr; + ptr = b.ptr; #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO( "cann pool[%d]: reused %p, " "pool_size = %5u MB, " "size = %5u MB, " "margin = %5u MB\n", - device, b.ptr, - (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576), - (uint32_t)(GGML_PAD(size, 1048576) / 1048576), - (uint32_t)(GGML_PAD(margin, 1048576) / 1048576)); + device, b.ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576), + (uint32_t) (GGML_PAD(size, 1048576) / 1048576), + (uint32_t) (GGML_PAD(margin, 1048576) / 1048576)); #endif break; } } - bool should_clean = !disable_clean && - b.size > min_free_margin && + bool should_clean = !disable_clean && b.size > min_free_margin && std::chrono::duration_cast(now - b.last_used).count() > 100; if (should_clean) { // free the buffer if the size is needed to be freed @@ -327,20 +319,20 @@ struct ggml_cann_pool_buf_prio : public ggml_cann_pool { "cann pool[%d]: clean %p, " "pool_size = %5u MB, " "size = %5u MB\n", - device, b.ptr, - (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576), - (uint32_t)(GGML_PAD(b.size, 1048576) / 1048576)); + device, b.ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576), + (uint32_t) (GGML_PAD(b.size, 1048576) / 1048576)); #endif continue; } free_buffers_rest.push_back(b); } - for (ggml_cann_buffer &b : free_buffers_rest) { + for (ggml_cann_buffer & b : free_buffers_rest) { free_buffers.push(std::move(b)); } #ifdef DEBUG_CANN_MALLOC - GGML_LOG_INFO("cann pool[%d] free pool_size = %5u MB\n\n", device, (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576)); + GGML_LOG_INFO("cann pool[%d] free pool_size = %5u MB\n\n", device, + (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576)); #endif if (ptr != nullptr) { return ptr; @@ -356,8 +348,8 @@ struct ggml_cann_pool_buf_prio : public ggml_cann_pool { "cann pool[%d]: allocate %p, " "pool_size = %5u MB, " "size = %5u MB\n", - device, ptr, (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576), - (uint32_t)(GGML_PAD(size, 1048576) / 1048576)); + device, ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576), + (uint32_t) (GGML_PAD(size, 1048576) / 1048576)); #endif buffer_pool.emplace(ptr, size); return ptr; @@ -369,7 +361,7 @@ struct ggml_cann_pool_buf_prio : public ggml_cann_pool { * @param ptr Pointer to the buffer to free. * @param size Size of the buffer to free. */ - void free(void* ptr, size_t size) override { + void free(void * ptr, size_t size) override { GGML_UNUSED(size); auto it = buffer_pool.find(ptr); if (it == buffer_pool.end()) { @@ -377,13 +369,12 @@ struct ggml_cann_pool_buf_prio : public ggml_cann_pool { } auto now = std::chrono::steady_clock::now(); - free_buffers.emplace(ggml_cann_buffer{ptr, it->second, now}); + free_buffers.emplace(ggml_cann_buffer{ ptr, it->second, now }); #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO( "cann pool[%d]: return %p, " "pool_size = %5u MB\n", - device, ptr, - (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576)); + device, ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576)); #endif } }; @@ -402,7 +393,7 @@ struct ggml_cann_pool_buf : public ggml_cann_pool { /** * @brief The minimum free margin for a buffer. */ - static const size_t min_free_margin = 1ull << 20; // 1MB + static const size_t min_free_margin = 1ull << 20; // 1MB /** * @brief The alignment for buffer allocation. @@ -428,10 +419,10 @@ struct ggml_cann_pool_buf : public ggml_cann_pool { * @brief Structure representing a CANN buffer. */ struct ggml_cann_buffer { - void* ptr = nullptr; ///< Pointer to the buffer memory. - size_t size = 0; ///< Size of the buffer. - bool used = false; ///< Whether the buffer is currently in use. - std::chrono::steady_clock::time_point last_used; ///< Last used time. + void * ptr = nullptr; ///< Pointer to the buffer memory. + size_t size = 0; ///< Size of the buffer. + bool used = false; ///< Whether the buffer is currently in use. + std::chrono::steady_clock::time_point last_used; ///< Last used time. }; /** @@ -459,7 +450,7 @@ struct ggml_cann_pool_buf : public ggml_cann_pool { ~ggml_cann_pool_buf() { ggml_cann_set_device(device); for (int i = 0; i < MAX_BUFFERS; ++i) { - ggml_cann_buffer& b = buffer_pool[i]; + ggml_cann_buffer & b = buffer_pool[i]; if (b.ptr != nullptr) { aclrtFree(b.ptr); pool_size -= b.size; @@ -476,18 +467,18 @@ struct ggml_cann_pool_buf : public ggml_cann_pool { * the allocated buffer. * @return A pointer to the allocated buffer. */ - void* alloc(size_t size, size_t* actual_size) override { + void * alloc(size_t size, size_t * actual_size) override { size = GGML_PAD(size, alignment); if (size == 0) { size = alignment; } - void* ptr = nullptr; - auto now = std::chrono::steady_clock::now(); + void * ptr = nullptr; + auto now = std::chrono::steady_clock::now(); int i = 0; for (; i < MAX_BUFFERS; ++i) { - ggml_cann_buffer& b = buffer_pool[i]; + ggml_cann_buffer & b = buffer_pool[i]; if (b.ptr == nullptr) { break; } @@ -499,25 +490,23 @@ struct ggml_cann_pool_buf : public ggml_cann_pool { const size_t margin = b.size - size; if (margin <= max_reuse_margin) { *actual_size = b.size; - b.used = true; - ptr = b.ptr; + b.used = true; + ptr = b.ptr; #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO( "cann pool[%d]: reused %p, " "pool_size = %5u MB, " "size = %5u MB, " "margin = %5u MB\n", - device, b.ptr, - (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576), - (uint32_t)(GGML_PAD(size, 1048576) / 1048576), - (uint32_t)(GGML_PAD(margin, 1048576) / 1048576)); + device, b.ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576), + (uint32_t) (GGML_PAD(size, 1048576) / 1048576), + (uint32_t) (GGML_PAD(margin, 1048576) / 1048576)); #endif break; } } - bool should_clean = !disable_clean && - b.size > min_free_margin && + bool should_clean = !disable_clean && b.size > min_free_margin && std::chrono::duration_cast(now - b.last_used).count() > 100; if (should_clean) { // free the buffer if the size is needed to be freed @@ -528,9 +517,8 @@ struct ggml_cann_pool_buf : public ggml_cann_pool { "cann pool[%d]: clean %p, " "pool_size = %5u MB, " "size = %5u MB\n", - device, b.ptr, - (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576), - (uint32_t)(GGML_PAD(b.size, 1048576) / 1048576)); + device, b.ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576), + (uint32_t) (GGML_PAD(b.size, 1048576) / 1048576)); #endif b.ptr = nullptr; } @@ -541,13 +529,13 @@ struct ggml_cann_pool_buf : public ggml_cann_pool { if (i < MAX_BUFFERS) { // allocate a new buffer if no buffer can be reused - ggml_cann_buffer& b = buffer_pool[i]; + ggml_cann_buffer & b = buffer_pool[i]; ggml_cann_set_device(device); ACL_CHECK(aclrtMalloc(&b.ptr, size, ACL_MEM_MALLOC_HUGE_FIRST)); pool_size += size; *actual_size = size; - b.size = size; - b.used = true; + b.size = size; + b.used = true; if (i >= MAX_BUFFERS - 8) { GGML_LOG_WARN("cann pool[%d]: slots almost full\n", device); } @@ -556,9 +544,8 @@ struct ggml_cann_pool_buf : public ggml_cann_pool { "cann pool[%d]: allocate %p, " "pool_size = %5u MB, " "size = %5u MB\n", - device, b.ptr, - (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576), - (uint32_t)(GGML_PAD(b.size, 1048576) / 1048576)); + device, b.ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576), + (uint32_t) (GGML_PAD(b.size, 1048576) / 1048576)); #endif return b.ptr; } @@ -572,21 +559,20 @@ struct ggml_cann_pool_buf : public ggml_cann_pool { * @param ptr Pointer to the buffer to free. * @param size Size of the buffer to free. */ - void free(void* ptr, size_t size) override { + void free(void * ptr, size_t size) override { GGML_UNUSED(size); for (int i = 0; i < MAX_BUFFERS; ++i) { - ggml_cann_buffer& b = buffer_pool[i]; + ggml_cann_buffer & b = buffer_pool[i]; if (b.ptr != ptr) { continue; } - b.used = false; + b.used = false; b.last_used = std::chrono::steady_clock::now(); #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO( "cann pool[%d]: return %p, " "pool_size = %5u MB\n", - device, b.ptr, - (uint32_t)(GGML_PAD(pool_size, 1048576) / 1048576)); + device, b.ptr, (uint32_t) (GGML_PAD(pool_size, 1048576) / 1048576)); #endif return; } @@ -614,7 +600,7 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { /** * @brief Pointer to the start of the virtual memory pool. */ - void* pool_addr = 0; + void * pool_addr = 0; /** * @brief Amount of virtual memory used in the pool. @@ -639,7 +625,7 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { /** * @brief Offsets for the mapped memory regions. */ - std::vector map_offsets; + std::vector map_offsets; /** * @brief Constructor to initialize the buffer pool with virtual memory for @@ -647,11 +633,10 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { * * @param device The device ID to associate with this buffer pool. */ - explicit ggml_cann_pool_vmm(int device) - : device(device) { - auto dev = ggml_cann_info().devices[device]; + explicit ggml_cann_pool_vmm(int device) : device(device) { + auto dev = ggml_cann_info().devices[device]; granularity = dev.vmm_granularity; - max_size = dev.total_vram; + max_size = dev.total_vram; } /** @@ -659,10 +644,10 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { */ ~ggml_cann_pool_vmm() { if (pool_addr != 0) { - for (auto& offset : map_offsets) { + for (auto & offset : map_offsets) { ACL_CHECK(aclrtUnmapMem(offset)); } - for (auto& handle : handles) { + for (auto & handle : handles) { ACL_CHECK(aclrtFreePhysical(handle)); } ACL_CHECK(aclrtReleaseMemAddress(pool_addr)); @@ -677,11 +662,11 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { * the allocated buffer. * @return A pointer to the allocated buffer. */ - void* alloc(size_t size, size_t* actual_size) override { + void * alloc(size_t size, size_t * actual_size) override { // round up the allocation size to the alignment to ensure that all // allocations are aligned for all data types const size_t alignment = 128; - size = GGML_PAD(size, alignment); + size = GGML_PAD(size, alignment); if (size == 0) { size = alignment; } @@ -691,53 +676,51 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { if (size > avail) { // round up to the next multiple of the granularity size_t reserve_size = size - avail; - reserve_size = GGML_PAD(reserve_size, granularity); + reserve_size = GGML_PAD(reserve_size, granularity); GGML_ASSERT(pool_size + reserve_size <= max_size); // allocate more physical memory aclrtPhysicalMemProp prop = {}; - prop.handleType = ACL_MEM_HANDLE_TYPE_NONE; - prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED; - prop.memAttr = ACL_HBM_MEM_HUGE; - prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE; - prop.location.id = device; - prop.reserve = 0; + prop.handleType = ACL_MEM_HANDLE_TYPE_NONE; + prop.allocationType = ACL_MEM_ALLOCATION_TYPE_PINNED; + prop.memAttr = ACL_HBM_MEM_HUGE; + prop.location.type = ACL_MEM_LOCATION_TYPE_DEVICE; + prop.location.id = device; + prop.reserve = 0; aclrtDrvMemHandle handle; ACL_CHECK(aclrtMallocPhysical(&handle, reserve_size, &prop, 0)); // reserve virtual address space (if not already reserved) if (pool_addr == 0) { - ACL_CHECK(aclrtReserveMemAddress( - &pool_addr, max_size, 0, NULL, 1)); + ACL_CHECK(aclrtReserveMemAddress(&pool_addr, max_size, 0, NULL, 1)); } // map at the end of the pool - ACL_CHECK(aclrtMapMem((char*)pool_addr + pool_size, reserve_size, 0, - handle, 0)); + ACL_CHECK(aclrtMapMem((char *) pool_addr + pool_size, reserve_size, 0, handle, 0)); handles.push_back(handle); - map_offsets.push_back((char*)pool_addr + pool_size); + map_offsets.push_back((char *) pool_addr + pool_size); // add to the pool pool_size += reserve_size; #ifdef DEBUG_CANN_MALLOC - GGML_LOG_INFO("cann pool[%d]: size increased to %llu MB (reserved %llu MB)\n", - device, (unsigned long long) (pool_size/1024/1024), - (unsigned long long) (reserve_size/1024/1024)); + GGML_LOG_INFO("cann pool[%d]: size increased to %llu MB (reserved %llu MB)\n", device, + (unsigned long long) (pool_size / 1024 / 1024), + (unsigned long long) (reserve_size / 1024 / 1024)); #endif } GGML_ASSERT(pool_addr != 0); - void* ptr = (void*)((char*)pool_addr + pool_used); + void * ptr = (void *) ((char *) pool_addr + pool_used); *actual_size = size; pool_used += size; #ifdef DEBUG_CANN_MALLOC - GGML_LOG_INFO("cann pool[%d]: allocated %llu bytes at %llx\n", device, - (unsigned long long)size, (unsigned long long)ptr); + GGML_LOG_INFO("cann pool[%d]: allocated %llu bytes at %llx\n", device, (unsigned long long) size, + (unsigned long long) ptr); #endif return ptr; } @@ -748,16 +731,16 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { * @param ptr Pointer to the buffer to free. * @param size Size of the buffer to free. */ - void free(void* ptr, size_t size) override { + void free(void * ptr, size_t size) override { #ifdef DEBUG_CANN_MALLOC - GGML_LOG_INFO("cann pool[%d]: freed %llu bytes at %llx\n", device, - (unsigned long long)size, (unsigned long long)ptr); + GGML_LOG_INFO("cann pool[%d]: freed %llu bytes at %llx\n", device, (unsigned long long) size, + (unsigned long long) ptr); #endif pool_used -= size; // all deallocations must be in reverse order of the allocations - GGML_ASSERT(ptr == (void*)((char*)pool_addr + pool_used)); + GGML_ASSERT(ptr == (void *) ((char *) pool_addr + pool_used)); } }; @@ -769,8 +752,7 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { * @param device The device ID for which to create the pool. * @return A unique pointer to the created CANN pool. */ -std::unique_ptr ggml_backend_cann_context::new_pool_for_device( - int device) { +std::unique_ptr ggml_backend_cann_context::new_pool_for_device(int device) { std::string mem_pool_type = get_env("GGML_CANN_MEM_POOL").value_or(""); if (mem_pool_type == "prio") { @@ -795,9 +777,8 @@ std::unique_ptr ggml_backend_cann_context::new_pool_for_device( * ID, device pointer, and a name derived from GGML_CANN_NAME and the device ID. */ struct ggml_backend_cann_buffer_context { - int32_t device; ///< The device ID associated with this buffer context. - void* dev_ptr = - nullptr; ///< Pointer to the device memory allocated for the buffer. + int32_t device; ///< The device ID associated with this buffer context. + void * dev_ptr = nullptr; ///< Pointer to the device memory allocated for the buffer. /** * @brief Constructor to initialize the CANN buffer context. @@ -805,9 +786,7 @@ struct ggml_backend_cann_buffer_context { * @param device The device ID associated with this buffer context. * @param dev_ptr Pointer to the device memory allocated for the buffer. */ - ggml_backend_cann_buffer_context(int32_t device, void* dev_ptr) - : device(device), - dev_ptr(dev_ptr) {} + ggml_backend_cann_buffer_context(int32_t device, void * dev_ptr) : device(device), dev_ptr(dev_ptr) {} /** * @brief Destructor to free the device memory allocated for the buffer. @@ -825,8 +804,8 @@ struct ggml_backend_cann_buffer_context { * @return true if the buffer is a CANN buffer, false otherwise. */ static bool ggml_backend_buft_is_cann(ggml_backend_buffer_type_t buft); -static bool ggml_backend_buffer_is_cann( - ggml_backend_buffer_t buffer) { + +static bool ggml_backend_buffer_is_cann(ggml_backend_buffer_t buffer) { return ggml_backend_buft_is_cann(buffer->buft); } @@ -838,10 +817,8 @@ static bool ggml_backend_buffer_is_cann( * * @param buffer The CANN buffer to free. */ -static void ggml_backend_cann_buffer_free_buffer( - ggml_backend_buffer_t buffer) { - ggml_backend_cann_buffer_context* ctx = - (ggml_backend_cann_buffer_context*)buffer->context; +static void ggml_backend_cann_buffer_free_buffer(ggml_backend_buffer_t buffer) { + ggml_backend_cann_buffer_context * ctx = (ggml_backend_cann_buffer_context *) buffer->context; delete ctx; } @@ -854,10 +831,8 @@ static void ggml_backend_cann_buffer_free_buffer( * @param buffer The CANN buffer whose base pointer is to be retrieved. * @return A pointer to the base of the device memory allocated for the buffer. */ -static void* ggml_backend_cann_buffer_get_base( - ggml_backend_buffer_t buffer) { - ggml_backend_cann_buffer_context* ctx = - (ggml_backend_cann_buffer_context*)buffer->context; +static void * ggml_backend_cann_buffer_get_base(ggml_backend_buffer_t buffer) { + ggml_backend_cann_buffer_context * ctx = (ggml_backend_cann_buffer_context *) buffer->context; return ctx->dev_ptr; } @@ -874,21 +849,17 @@ static void* ggml_backend_cann_buffer_get_base( * @param dst Pointer to the destination buffer where transformed data will be * stored. */ -static void ggml_backend_cann_transform_q4_0(ggml_tensor* tensor, - const void* src, - void* dst) { - - int64_t n_elems = ggml_nelements(tensor); - int64_t groups = n_elems / QK4_0; - size_t quant_bytes = n_elems * sizeof(uint8_t) / 2; +static void ggml_backend_cann_transform_q4_0(ggml_tensor * tensor, const void * src, void * dst) { + int64_t n_elems = ggml_nelements(tensor); + int64_t groups = n_elems / QK4_0; + size_t quant_bytes = n_elems * sizeof(uint8_t) / 2; - uint8_t* quant_offset = (uint8_t*)dst; - uint16_t* scale_offset = (uint16_t*)((char*)dst + quant_bytes); + uint8_t * quant_offset = (uint8_t *) dst; + uint16_t * scale_offset = (uint16_t *) ((char *) dst + quant_bytes); for (int i = 0; i < groups; i++) { - const block_q4_0* group = - (const block_q4_0*)((const char*)src + i * sizeof(block_q4_0)); - *scale_offset = group->d; + const block_q4_0 * group = (const block_q4_0 *) ((const char *) src + i * sizeof(block_q4_0)); + *scale_offset = group->d; scale_offset++; // 0-15 @@ -907,8 +878,7 @@ static void ggml_backend_cann_transform_q4_0(ggml_tensor* tensor, } // put (uint4b_t -8) into int4b_t - for (quant_offset = (uint8_t*)dst; - quant_offset < (uint8_t*)dst + quant_bytes; quant_offset++) { + for (quant_offset = (uint8_t *) dst; quant_offset < (uint8_t *) dst + quant_bytes; quant_offset++) { (*quant_offset) ^= 0x88; } } @@ -926,29 +896,27 @@ static void ggml_backend_cann_transform_q4_0(ggml_tensor* tensor, * @param dst Pointer to the destination buffer where the Q4.0 formatted data * will be stored. */ -static void ggml_backend_cann_transform_back_q4_0( - const ggml_tensor* tensor, void* src, void* dst) { +static void ggml_backend_cann_transform_back_q4_0(const ggml_tensor * tensor, void * src, void * dst) { + int64_t n_elems = ggml_nelements(tensor); + int64_t groups = n_elems / QK4_0; + size_t quant_bytes = n_elems * sizeof(uint8_t) / 2; - int64_t n_elems = ggml_nelements(tensor); - int64_t groups = n_elems / QK4_0; - size_t quant_bytes = n_elems * sizeof(uint8_t) / 2; + uint8_t * quant_offset = (uint8_t *) src; + uint16_t * scale_offset = (uint16_t *) ((char *) src + quant_bytes); - uint8_t* quant_offset = (uint8_t*)src; - uint16_t* scale_offset = (uint16_t*)((char*)src + quant_bytes); - - for (; quant_offset < (uint8_t*)src + quant_bytes; quant_offset++) { + for (; quant_offset < (uint8_t *) src + quant_bytes; quant_offset++) { (*quant_offset) ^= 0x88; } - quant_offset = (uint8_t*)src; + quant_offset = (uint8_t *) src; for (int i = 0; i < groups; i++) { - block_q4_0* group = (block_q4_0*)((char*)dst + i * sizeof(block_q4_0)); - group->d = *scale_offset; + block_q4_0 * group = (block_q4_0 *) ((char *) dst + i * sizeof(block_q4_0)); + group->d = *scale_offset; scale_offset++; // 0-15 for (int j = 0; j < QK4_0 / 2; j += 2) { - group->qs[j] = ((*quant_offset) & 0x0F); + group->qs[j] = ((*quant_offset) & 0x0F); group->qs[j + 1] = ((*quant_offset) >> 4); quant_offset++; } @@ -975,20 +943,17 @@ static void ggml_backend_cann_transform_back_q4_0( * @param dst Pointer to the destination buffer where transformed data will be * stored. */ -static void ggml_backend_cann_transform_q8_0(ggml_tensor* tensor, - const void* src, - void* dst) { - int64_t n_elems = ggml_nelements(tensor); - int64_t groups = n_elems / QK8_0; - size_t quant_bytes = n_elems * sizeof(uint8_t); +static void ggml_backend_cann_transform_q8_0(ggml_tensor * tensor, const void * src, void * dst) { + int64_t n_elems = ggml_nelements(tensor); + int64_t groups = n_elems / QK8_0; + size_t quant_bytes = n_elems * sizeof(uint8_t); - uint8_t* quant_offset = (uint8_t*)dst; - uint16_t* scale_offset = (uint16_t*)((char*)dst + quant_bytes); + uint8_t * quant_offset = (uint8_t *) dst; + uint16_t * scale_offset = (uint16_t *) ((char *) dst + quant_bytes); for (int i = 0; i < groups; i++) { - const block_q8_0* group = - (const block_q8_0*)((const char*)src + i * sizeof(block_q8_0)); - *scale_offset = group->d; + const block_q8_0 * group = (const block_q8_0 *) ((const char *) src + i * sizeof(block_q8_0)); + *scale_offset = group->d; scale_offset++; size_t group_quant_size = QK8_0 * sizeof(uint8_t); memcpy(quant_offset, group->qs, group_quant_size); @@ -1009,19 +974,17 @@ static void ggml_backend_cann_transform_q8_0(ggml_tensor* tensor, * @param dst Pointer to the destination buffer where the Q8.0 formatted data * will be stored. */ -static void ggml_backend_cann_transform_back_q8_0( - const ggml_tensor* tensor, const void* src, void* dst) { - int64_t n_elems = ggml_nelements(tensor); - int64_t groups = n_elems / QK8_0; - size_t quant_bytes = n_elems * sizeof(uint8_t); +static void ggml_backend_cann_transform_back_q8_0(const ggml_tensor * tensor, const void * src, void * dst) { + int64_t n_elems = ggml_nelements(tensor); + int64_t groups = n_elems / QK8_0; + size_t quant_bytes = n_elems * sizeof(uint8_t); - const uint8_t* quant_offset = (const uint8_t*)src; - const uint16_t* scale_offset = - (const uint16_t*)((const char*)src + quant_bytes); + const uint8_t * quant_offset = (const uint8_t *) src; + const uint16_t * scale_offset = (const uint16_t *) ((const char *) src + quant_bytes); for (int i = 0; i < groups; i++) { - block_q8_0* group = (block_q8_0*)((char*)dst + i * sizeof(block_q8_0)); - group->d = *scale_offset; + block_q8_0 * group = (block_q8_0 *) ((char *) dst + i * sizeof(block_q8_0)); + group->d = *scale_offset; scale_offset++; size_t group_quant_size = QK8_0 * sizeof(uint8_t); memcpy(group->qs, quant_offset, group_quant_size); @@ -1041,8 +1004,7 @@ static void ggml_backend_cann_transform_back_q8_0( * @param dst Pointer to the destination buffer where transformed data will be * stored. */ -static void ggml_backend_cann_transform(ggml_tensor* tensor, - const void* src, void* dst) { +static void ggml_backend_cann_transform(ggml_tensor * tensor, const void * src, void * dst) { switch (tensor->type) { case GGML_TYPE_Q4_0: ggml_backend_cann_transform_q4_0(tensor, src, dst); @@ -1067,8 +1029,7 @@ static void ggml_backend_cann_transform(ggml_tensor* tensor, * @param dst Pointer to the destination buffer where transformed tensor data * will be stored. */ -static void ggml_backend_cann_transform_back( - const ggml_tensor* tensor, void* src, void* dst) { +static void ggml_backend_cann_transform_back(const ggml_tensor * tensor, void * src, void * dst) { switch (tensor->type) { case GGML_TYPE_Q4_0: ggml_backend_cann_transform_back_q4_0(tensor, src, dst); @@ -1109,8 +1070,7 @@ static bool need_transform(ggml_type type) { * @param buffer The CANN buffer from which to initialize the tensor. * @param tensor Pointer to the tensor to be initialized. */ -static enum ggml_status ggml_backend_cann_buffer_init_tensor( - ggml_backend_buffer_t buffer, ggml_tensor* tensor) { +static enum ggml_status ggml_backend_cann_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { if (tensor->view_src != NULL && tensor->view_offs == 0) { GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft); return GGML_STATUS_SUCCESS; @@ -1121,13 +1081,11 @@ static enum ggml_status ggml_backend_cann_buffer_init_tensor( if (ggml_is_quantized(tensor->type)) { // Initialize padding to 0 to avoid possible NaN values size_t original_size = ggml_nbytes(tensor); - size_t padded_size = - ggml_backend_buft_get_alloc_size(buffer->buft, tensor); + size_t padded_size = ggml_backend_buft_get_alloc_size(buffer->buft, tensor); if (padded_size > original_size && tensor->view_src == nullptr) { size_t memset_size = padded_size - original_size; - ACL_CHECK(aclrtMemset((char*)tensor->data + original_size, - memset_size, 0, memset_size)); + ACL_CHECK(aclrtMemset((char *) tensor->data + original_size, memset_size, 0, memset_size)); } } return GGML_STATUS_SUCCESS; @@ -1141,8 +1099,8 @@ static enum ggml_status ggml_backend_cann_buffer_init_tensor( * designed to be used with a global array, one per device. */ struct ggml_cann_nz_workspace { - void* ptr; // Pointer to allocated device buffer - size_t allocated; // Size of currently allocated buffer in bytes + void * ptr; // Pointer to allocated device buffer + size_t allocated; // Size of currently allocated buffer in bytes /** * @brief Constructor. Initializes the workspace with no allocated memory. @@ -1158,7 +1116,7 @@ struct ggml_cann_nz_workspace { void clear() { if (ptr) { ACL_CHECK(aclrtFree(ptr)); - ptr = nullptr; + ptr = nullptr; allocated = 0; } } @@ -1185,7 +1143,7 @@ struct ggml_cann_nz_workspace { * * @return Pointer to the allocated buffer, or nullptr if not allocated. */ - void* get() const { return ptr; } + void * get() const { return ptr; } }; /** @@ -1207,19 +1165,17 @@ static ggml_cann_nz_workspace g_nz_workspaces[GGML_CANN_MAX_DEVICES]; * @note The workspace buffer used in this function is managed globally and reused * across calls. This reduces overhead from repeated memory allocation and deallocation. */ -static void weight_format_to_nz(ggml_tensor *tensor, size_t offset, int device) { - aclTensor* weightTransposed = ggml_cann_create_tensor(tensor, tensor->ne, - tensor->nb, 2, ACL_FORMAT_ND, offset); - uint64_t workspaceSize = 0; - aclOpExecutor *executor; +static void weight_format_to_nz(ggml_tensor * tensor, size_t offset, int device) { + aclTensor * weightTransposed = ggml_cann_create_tensor(tensor, tensor->ne, tensor->nb, 2, ACL_FORMAT_ND, offset); + uint64_t workspaceSize = 0; + aclOpExecutor * executor; // TransMatmulWeight - ACL_CHECK(aclnnTransMatmulWeightGetWorkspaceSize(weightTransposed, - &workspaceSize, &executor)); + ACL_CHECK(aclnnTransMatmulWeightGetWorkspaceSize(weightTransposed, &workspaceSize, &executor)); // Avoid frequent malloc/free of the workspace. g_nz_workspaces[device].realloc(workspaceSize); - void* g_nz_workspace = g_nz_workspaces[device].get(); + void * g_nz_workspace = g_nz_workspaces[device].get(); ACL_CHECK(aclnnTransMatmulWeight(g_nz_workspace, workspaceSize, executor, nullptr)); ACL_CHECK(aclDestroyTensor(weightTransposed)); @@ -1238,11 +1194,12 @@ static void weight_format_to_nz(ggml_tensor *tensor, size_t offset, int device) * @param offset Offset in the source data from where to start copying. * @param size Size of the data to be copied, in bytes. */ -static void ggml_backend_cann_buffer_set_tensor( - ggml_backend_buffer_t buffer, ggml_tensor *tensor, const void *data, - size_t offset, size_t size) { - ggml_backend_cann_buffer_context *ctx = - (ggml_backend_cann_buffer_context *)buffer->context; +static void ggml_backend_cann_buffer_set_tensor(ggml_backend_buffer_t buffer, + ggml_tensor * tensor, + const void * data, + size_t offset, + size_t size) { + ggml_backend_cann_buffer_context * ctx = (ggml_backend_cann_buffer_context *) buffer->context; ggml_cann_set_device(ctx->device); // TODO: refer to cann(#6017), it use thread's default stream. @@ -1252,20 +1209,17 @@ static void ggml_backend_cann_buffer_set_tensor( // Only check env once. static bool weight_to_nz = parse_bool(get_env("GGML_CANN_WEIGHT_NZ").value_or("on")); if (!need_transform(tensor->type)) { - ACL_CHECK(aclrtMemcpy((char *)tensor->data + offset, size, data, size, - ACL_MEMCPY_HOST_TO_DEVICE)); - if (weight_to_nz && is_matmul_weight((const ggml_tensor*)tensor)) { + ACL_CHECK(aclrtMemcpy((char *) tensor->data + offset, size, data, size, ACL_MEMCPY_HOST_TO_DEVICE)); + if (weight_to_nz && is_matmul_weight((const ggml_tensor *) tensor)) { GGML_ASSERT(tensor->ne[2] == 1); GGML_ASSERT(tensor->ne[3] == 1); weight_format_to_nz(tensor, offset, ctx->device); } } else { - void *transform_buffer = malloc(size); + void * transform_buffer = malloc(size); ggml_backend_cann_transform(tensor, data, transform_buffer); - ACL_CHECK(aclrtMemcpy((char *)tensor->data + offset, size, - transform_buffer, size, - ACL_MEMCPY_HOST_TO_DEVICE)); + ACL_CHECK(aclrtMemcpy((char *) tensor->data + offset, size, transform_buffer, size, ACL_MEMCPY_HOST_TO_DEVICE)); free(transform_buffer); } } @@ -1283,22 +1237,20 @@ static void ggml_backend_cann_buffer_set_tensor( * @param offset Offset in the destination buffer where to start copying. * @param size Size of the data to be copied, in bytes. */ -static void ggml_backend_cann_buffer_get_tensor( - ggml_backend_buffer_t buffer, const ggml_tensor* tensor, void* data, - size_t offset, size_t size) { - ggml_backend_cann_buffer_context* ctx = - (ggml_backend_cann_buffer_context*)buffer->context; +static void ggml_backend_cann_buffer_get_tensor(ggml_backend_buffer_t buffer, + const ggml_tensor * tensor, + void * data, + size_t offset, + size_t size) { + ggml_backend_cann_buffer_context * ctx = (ggml_backend_cann_buffer_context *) buffer->context; ggml_cann_set_device(ctx->device); if (!need_transform(tensor->type)) { - ACL_CHECK(aclrtMemcpy(data, size, (char*)tensor->data + offset, size, - ACL_MEMCPY_DEVICE_TO_HOST)); + ACL_CHECK(aclrtMemcpy(data, size, (char *) tensor->data + offset, size, ACL_MEMCPY_DEVICE_TO_HOST)); } else { - void* transform_buffer = malloc(size); - ACL_CHECK(aclrtMemcpy(transform_buffer, size, - (char*)tensor->data + offset, size, - ACL_MEMCPY_DEVICE_TO_HOST)); + void * transform_buffer = malloc(size); + ACL_CHECK(aclrtMemcpy(transform_buffer, size, (char *) tensor->data + offset, size, ACL_MEMCPY_DEVICE_TO_HOST)); ggml_backend_cann_transform_back(tensor, transform_buffer, data); free(transform_buffer); } @@ -1317,19 +1269,17 @@ static void ggml_backend_cann_buffer_get_tensor( * @param dst Pointer to the destination tensor where the data will be copied. * @return true if the copy operation succeeded, false otherwise. */ -static bool ggml_backend_cann_buffer_cpy_tensor( - ggml_backend_buffer_t buffer, const ggml_tensor* src, ggml_tensor* dst) { +static bool ggml_backend_cann_buffer_cpy_tensor(ggml_backend_buffer_t buffer, + const ggml_tensor * src, + ggml_tensor * dst) { if (ggml_backend_buffer_is_cann(src->buffer)) { - ggml_backend_cann_buffer_context* src_ctx = - (ggml_backend_cann_buffer_context*)src->buffer->context; - ggml_backend_cann_buffer_context* dst_ctx = - (ggml_backend_cann_buffer_context*)buffer->context; + ggml_backend_cann_buffer_context * src_ctx = (ggml_backend_cann_buffer_context *) src->buffer->context; + ggml_backend_cann_buffer_context * dst_ctx = (ggml_backend_cann_buffer_context *) buffer->context; size_t memcpy_size = ggml_nbytes(src); // Same device. if (src_ctx->device == dst_ctx->device) { - ACL_CHECK(aclrtMemcpy((char*)dst->data, memcpy_size, - (const char*)src->data, memcpy_size, + ACL_CHECK(aclrtMemcpy((char *) dst->data, memcpy_size, (const char *) src->data, memcpy_size, ACL_MEMCPY_DEVICE_TO_DEVICE)); return true; } else { @@ -1339,13 +1289,11 @@ static bool ggml_backend_cann_buffer_cpy_tensor( #endif // Different device but can access by peer. int32_t canAccessPeer = 0; - ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, src_ctx->device, - dst_ctx->device)); + ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, src_ctx->device, dst_ctx->device)); if (canAccessPeer) { ggml_cann_set_device(src_ctx->device); ACL_CHECK(aclrtDeviceEnablePeerAccess(dst_ctx->device, 0)); - ACL_CHECK(aclrtMemcpy((char*)dst->data, memcpy_size, - (const char*)src->data, memcpy_size, + ACL_CHECK(aclrtMemcpy((char *) dst->data, memcpy_size, (const char *) src->data, memcpy_size, ACL_MEMCPY_DEVICE_TO_DEVICE)); return true; } @@ -1363,10 +1311,8 @@ static bool ggml_backend_cann_buffer_cpy_tensor( * @param buffer The CANN buffer to be cleared. * @param value The value to which each byte in the buffer will be set. */ -static void ggml_backend_cann_buffer_clear( - ggml_backend_buffer_t buffer, uint8_t value) { - ggml_backend_cann_buffer_context* ctx = - (ggml_backend_cann_buffer_context*)buffer->context; +static void ggml_backend_cann_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { + ggml_backend_cann_buffer_context * ctx = (ggml_backend_cann_buffer_context *) buffer->context; ggml_cann_set_device(ctx->device); ACL_CHECK(aclrtMemset(ctx->dev_ptr, buffer->size, value, buffer->size)); @@ -1396,9 +1342,8 @@ static const ggml_backend_buffer_i ggml_backend_cann_buffer_interface = { * buffer type. */ struct ggml_backend_cann_buffer_type_context { - int32_t - device; /**< Device identifier associated with the buffer context. */ - std::string name; /**< Name associated with the buffer context. */ + int32_t device; /**< Device identifier associated with the buffer context. */ + std::string name; /**< Name associated with the buffer context. */ }; /** @@ -1410,10 +1355,8 @@ struct ggml_backend_cann_buffer_type_context { * @param buft Pointer to the buffer type context. * @return Const pointer to the C-style string containing the name. */ -static const char* ggml_backend_cann_buffer_type_name( - ggml_backend_buffer_type_t buft) { - ggml_backend_cann_buffer_type_context* buft_ctx = - (ggml_backend_cann_buffer_type_context*)buft->context; +static const char * ggml_backend_cann_buffer_type_name(ggml_backend_buffer_type_t buft) { + ggml_backend_cann_buffer_type_context * buft_ctx = (ggml_backend_cann_buffer_type_context *) buft->context; return buft_ctx->name.c_str(); } @@ -1428,34 +1371,27 @@ static const char* ggml_backend_cann_buffer_type_name( * @param size Size in bytes of the buffer to allocate. * @return Pointer to the allocated buffer, or nullptr if allocation fails. */ -static ggml_backend_buffer_t -ggml_backend_cann_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, - size_t size) { - ggml_backend_cann_buffer_type_context* buft_ctx = - (ggml_backend_cann_buffer_type_context*)buft->context; +static ggml_backend_buffer_t ggml_backend_cann_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { + ggml_backend_cann_buffer_type_context * buft_ctx = (ggml_backend_cann_buffer_type_context *) buft->context; ggml_cann_set_device(buft_ctx->device); const size_t alignment = 128; - size = GGML_PAD(size, alignment); + size = GGML_PAD(size, alignment); if (size == 0) { size = alignment; } - void* dev_ptr; + void * dev_ptr; aclError err = aclrtMalloc(&dev_ptr, size, ACL_MEM_MALLOC_HUGE_FIRST); if (err != ACL_SUCCESS) { - GGML_LOG_ERROR( - "%s: allocating %.2f MiB on device %d: aclrtMalloc failed: %s\n", - __func__, size / 1024.0 / 1024.0, buft_ctx->device, - aclGetRecentErrMsg()); + GGML_LOG_ERROR("%s: allocating %.2f MiB on device %d: aclrtMalloc failed: %s\n", __func__, + size / 1024.0 / 1024.0, buft_ctx->device, aclGetRecentErrMsg()); return nullptr; } - ggml_backend_cann_buffer_context* ctx = - new ggml_backend_cann_buffer_context(buft_ctx->device, dev_ptr); + ggml_backend_cann_buffer_context * ctx = new ggml_backend_cann_buffer_context(buft_ctx->device, dev_ptr); - return ggml_backend_buffer_init(buft, ggml_backend_cann_buffer_interface, - ctx, size); + return ggml_backend_buffer_init(buft, ggml_backend_cann_buffer_interface, ctx, size); } /** @@ -1470,8 +1406,7 @@ ggml_backend_cann_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, * @return The alignment requirement in bytes (fixed at 128 bytes for CANN * buffers). */ -static size_t ggml_backend_cann_buffer_type_get_alignment( - ggml_backend_buffer_type_t buft) { +static size_t ggml_backend_cann_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return 128; GGML_UNUSED(buft); @@ -1491,10 +1426,10 @@ static size_t ggml_backend_cann_buffer_type_get_alignment( * @return The total allocation size in bytes required for the tensor in the * CANN buffer. */ -static size_t ggml_backend_cann_buffer_type_get_alloc_size( - ggml_backend_buffer_type_t buft, const ggml_tensor* tensor) { - size_t size = ggml_nbytes(tensor); - int64_t ne0 = tensor->ne[0]; +static size_t ggml_backend_cann_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, + const ggml_tensor * tensor) { + size_t size = ggml_nbytes(tensor); + int64_t ne0 = tensor->ne[0]; // Only check env once. static bool weight_to_nz = parse_bool(get_env("GGML_CANN_WEIGHT_NZ").value_or("on")); @@ -1507,19 +1442,17 @@ static size_t ggml_backend_cann_buffer_type_get_alloc_size( // size += (line_size_align_32 - line_size); if (ggml_is_quantized(tensor->type)) { if (ne0 % MATRIX_ROW_PADDING != 0) { - size += ggml_row_size( - tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); + size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); } - } else if (weight_to_nz && is_matmul_weight((const ggml_tensor*)tensor)) { + } else if (weight_to_nz && is_matmul_weight((const ggml_tensor *) tensor)) { // NZ format weight are not support quantized yet. // If ND tensor transform to NZ, size may changed. - int64_t shape[] = {tensor->ne[1], tensor->ne[0]}; + int64_t shape[] = { tensor->ne[1], tensor->ne[0] }; GGML_ASSERT(tensor->ne[2] == 1); GGML_ASSERT(tensor->ne[3] == 1); - const aclIntArray *acl_shape = aclCreateIntArray(shape, 2); - size_t new_size; - ACL_CHECK(aclnnCalculateMatmulWeightSizeV2(acl_shape, - ggml_cann_type_mapping(tensor->type), &new_size)); + const aclIntArray * acl_shape = aclCreateIntArray(shape, 2); + size_t new_size; + ACL_CHECK(aclnnCalculateMatmulWeightSizeV2(acl_shape, ggml_cann_type_mapping(tensor->type), &new_size)); ACL_CHECK(aclDestroyIntArray(acl_shape)); size = std::max(size, new_size); } @@ -1560,17 +1493,15 @@ static const ggml_backend_buffer_type_i ggml_backend_cann_buffer_type_interface * @return A pointer to the buffer type interface for the specified device, or * nullptr if the device index is out of range. */ -ggml_backend_buffer_type_t -ggml_backend_cann_buffer_type(int32_t device) { - static std::mutex mutex; +ggml_backend_buffer_type_t ggml_backend_cann_buffer_type(int32_t device) { + static std::mutex mutex; std::lock_guard lock(mutex); if (device >= ggml_backend_cann_get_device_count()) { return nullptr; } - static ggml_backend_buffer_type - ggml_backend_cann_buffer_types[GGML_CANN_MAX_DEVICES]; + static ggml_backend_buffer_type ggml_backend_cann_buffer_types[GGML_CANN_MAX_DEVICES]; static bool ggml_backend_cann_buffer_type_initialized = false; @@ -1580,8 +1511,7 @@ ggml_backend_cann_buffer_type(int32_t device) { /* .iface = */ ggml_backend_cann_buffer_type_interface, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), i), /* .context = */ - new ggml_backend_cann_buffer_type_context{ - i, "CANN" + std::to_string(i)}, + new ggml_backend_cann_buffer_type_context{ i, "CANN" + std::to_string(i) }, }; } ggml_backend_cann_buffer_type_initialized = true; @@ -1645,16 +1575,16 @@ static void * ggml_cann_host_malloc(size_t size) { } const size_t alignment = 128; - size = GGML_PAD(size, alignment); + size = GGML_PAD(size, alignment); if (size == 0) { size = alignment; } - void * hostPtr = nullptr; - aclError err = aclrtMallocHost((void **) &hostPtr, size); + void * hostPtr = nullptr; + aclError err = aclrtMallocHost((void **) &hostPtr, size); if (err != ACL_SUCCESS) { - GGML_LOG_WARN("%s: failed to allocate %.2f MiB of pinned memory: %s\n", __func__, - size / 1024.0 / 1024.0, aclGetRecentErrMsg()); + GGML_LOG_WARN("%s: failed to allocate %.2f MiB of pinned memory: %s\n", __func__, size / 1024.0 / 1024.0, + aclGetRecentErrMsg()); return nullptr; } return hostPtr; @@ -1667,7 +1597,8 @@ static void * ggml_cann_host_malloc(size_t size) { * @param size Size in bytes of the host buffer to allocate. * @return Pointer to the allocated host buffer, or CPU buffer pointer if allocation fails. */ -static ggml_backend_buffer_t ggml_backend_cann_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { +static ggml_backend_buffer_t ggml_backend_cann_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, + size_t size) { void * hostPtr = ggml_cann_host_malloc(size); if (hostPtr == nullptr) { @@ -1676,8 +1607,8 @@ static ggml_backend_buffer_t ggml_backend_cann_host_buffer_type_alloc_buffer(ggm } ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(hostPtr, size); - buffer->buft = buft; - buffer->iface.free_buffer = ggml_backend_cann_host_buffer_free; + buffer->buft = buft; + buffer->iface.free_buffer = ggml_backend_cann_host_buffer_free; return buffer; } @@ -1691,14 +1622,15 @@ static ggml_backend_buffer_t ggml_backend_cann_host_buffer_type_alloc_buffer(ggm ggml_backend_buffer_type_t ggml_backend_cann_host_buffer_type() { static struct ggml_backend_buffer_type ggml_backend_cann_buffer_type_host = { /* .iface = */ { - /* .get_name = */ ggml_backend_cann_host_buffer_type_name, - /* .alloc_buffer = */ ggml_backend_cann_host_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment, - /* .get_max_size = */ NULL, // defaults to SIZE_MAX + /* .get_name = */ ggml_backend_cann_host_buffer_type_name, + /* .alloc_buffer = */ ggml_backend_cann_host_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment, + /* .get_max_size = */ NULL, // defaults to SIZE_MAX /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size, - /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host, - }, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), 0), + /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host, + }, + /* .device = */ + ggml_backend_reg_dev_get(ggml_backend_cann_reg(), 0), /* .context = */ nullptr, }; @@ -1718,8 +1650,7 @@ ggml_backend_buffer_type_t ggml_backend_cann_host_buffer_type() { * stored. * @return true if the computation was successful; false otherwise. */ -static bool ggml_cann_compute_forward(ggml_backend_cann_context& ctx, - struct ggml_tensor* dst) { +static bool ggml_cann_compute_forward(ggml_backend_cann_context & ctx, struct ggml_tensor * dst) { switch (dst->op) { case GGML_OP_REPEAT: ggml_cann_repeat(ctx, dst); @@ -1765,14 +1696,14 @@ static bool ggml_cann_compute_forward(ggml_backend_cann_context& ctx, case GGML_UNARY_OP_SILU: GGML_CANN_CALL_OP_UNARY(Silu); break; - case GGML_UNARY_OP_GELU_QUICK: { - auto lambda = [](ggml_backend_cann_context& ctx, - aclTensor* acl_src, - aclTensor* acl_dst) { - GGML_CANN_CALL_ACLNN_OP(ctx, GeluV2, acl_src, 0, acl_dst); - }; - ggml_cann_op_unary(lambda, ctx, dst); - } break; + case GGML_UNARY_OP_GELU_QUICK: + { + auto lambda = [](ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { + GGML_CANN_CALL_ACLNN_OP(ctx, GeluV2, acl_src, 0, acl_dst); + }; + ggml_cann_op_unary(lambda, ctx, dst); + } + break; case GGML_UNARY_OP_TANH: GGML_CANN_CALL_OP_UNARY(Tanh); break; @@ -1817,14 +1748,14 @@ static bool ggml_cann_compute_forward(ggml_backend_cann_context& ctx, case GGML_GLU_OP_SWIGLU: GGML_CANN_CALL_OP_UNARY_GATED(Silu); break; - case GGML_GLU_OP_GEGLU_QUICK: { - auto lambda = [](ggml_backend_cann_context& ctx, - aclTensor* acl_src, - aclTensor* acl_dst) { - GGML_CANN_CALL_ACLNN_OP(ctx, GeluV2, acl_src, 0, acl_dst); - }; - ggml_cann_op_unary_gated(lambda, ctx, dst); - } break; + case GGML_GLU_OP_GEGLU_QUICK: + { + auto lambda = [](ggml_backend_cann_context & ctx, aclTensor * acl_src, aclTensor * acl_dst) { + GGML_CANN_CALL_ACLNN_OP(ctx, GeluV2, acl_src, 0, acl_dst); + }; + ggml_cann_op_unary_gated(lambda, ctx, dst); + } + break; default: return false; } @@ -1956,9 +1887,8 @@ static bool ggml_cann_compute_forward(ggml_backend_cann_context& ctx, * @param backend Pointer to the CANN backend structure. * @return A pointer to a constant string representing the backend name. */ -static const char* ggml_backend_cann_name(ggml_backend_t backend) { - ggml_backend_cann_context* cann_ctx = - (ggml_backend_cann_context*)backend->context; +static const char * ggml_backend_cann_name(ggml_backend_t backend) { + ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; return cann_ctx->name.c_str(); } @@ -1972,8 +1902,7 @@ static const char* ggml_backend_cann_name(ggml_backend_t backend) { * @param backend Pointer to the CANN backend structure to be freed. */ static void ggml_backend_cann_free(ggml_backend_t backend) { - ggml_backend_cann_context* cann_ctx = - (ggml_backend_cann_context*)backend->context; + ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; ACL_CHECK(aclrtSynchronizeDevice()); ACL_CHECK(aclrtResetDevice(cann_ctx->device)); @@ -1981,7 +1910,6 @@ static void ggml_backend_cann_free(ggml_backend_t backend) { delete backend; } - /** * @brief Sets tensor data asynchronously in the CANN backend. * @@ -1994,21 +1922,17 @@ static void ggml_backend_cann_free(ggml_backend_t backend) { * @param size Size of the data to copy in bytes. */ static void ggml_backend_cann_set_tensor_async(ggml_backend_t backend, - ggml_tensor *tensor, - const void *data, - size_t offset, - size_t size) { - ggml_backend_cann_context *cann_ctx = - (ggml_backend_cann_context *)backend->context; - ggml_backend_buffer_t buf = - tensor->view_src ? tensor->view_src->buffer : tensor->buffer; - - GGML_ASSERT(buf->buft == ggml_backend_cann_buffer_type(cann_ctx->device) && - "unsupported buffer type"); + ggml_tensor * tensor, + const void * data, + size_t offset, + size_t size) { + ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; + ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; + + GGML_ASSERT(buf->buft == ggml_backend_cann_buffer_type(cann_ctx->device) && "unsupported buffer type"); GGML_ASSERT(!ggml_is_quantized(tensor->type)); - ggml_cann_async_memcpy(cann_ctx, (char *)tensor->data + offset, data, size, - ACL_MEMCPY_HOST_TO_DEVICE); + ggml_cann_async_memcpy(cann_ctx, (char *) tensor->data + offset, data, size, ACL_MEMCPY_HOST_TO_DEVICE); } /** @@ -2022,21 +1946,18 @@ static void ggml_backend_cann_set_tensor_async(ggml_backend_t backend, * @param offset Offset in bytes within the host data. * @param size Size of the data to copy in bytes. */ -static void ggml_backend_cann_get_tensor_async( - ggml_backend_t backend, const ggml_tensor *tensor, void *data, - size_t offset, size_t size) { - ggml_backend_cann_context *cann_ctx = - (ggml_backend_cann_context *)backend->context; - ggml_backend_buffer_t buf = - tensor->view_src ? tensor->view_src->buffer : tensor->buffer; +static void ggml_backend_cann_get_tensor_async(ggml_backend_t backend, + const ggml_tensor * tensor, + void * data, + size_t offset, + size_t size) { + ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; + ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; - GGML_ASSERT(buf->buft == ggml_backend_cann_buffer_type(cann_ctx->device) && - "unsupported buffer type"); + GGML_ASSERT(buf->buft == ggml_backend_cann_buffer_type(cann_ctx->device) && "unsupported buffer type"); GGML_ASSERT(!ggml_is_quantized(tensor->type)); - ggml_cann_async_memcpy(cann_ctx, data, (char *)tensor->data + offset, size, - ACL_MEMCPY_DEVICE_TO_HOST); - + ggml_cann_async_memcpy(cann_ctx, data, (char *) tensor->data + offset, size, ACL_MEMCPY_DEVICE_TO_HOST); } /** @@ -2052,28 +1973,23 @@ static void ggml_backend_cann_get_tensor_async( * @param dst Pointer to the destination tensor to copy data to. * @return true if the copy operation succeeds, false otherwise. */ -static bool ggml_backend_cann_cpy_tensor_async( - ggml_backend_t backend_src, ggml_backend_t backend_dst, - const ggml_tensor* src, ggml_tensor* dst) { - GGML_ASSERT(ggml_backend_is_cann(backend_src) || - ggml_backend_is_cann(backend_dst)); +static bool ggml_backend_cann_cpy_tensor_async(ggml_backend_t backend_src, + ggml_backend_t backend_dst, + const ggml_tensor * src, + ggml_tensor * dst) { + GGML_ASSERT(ggml_backend_is_cann(backend_src) || ggml_backend_is_cann(backend_dst)); - GGML_ASSERT(!is_matmul_weight((const ggml_tensor*)src)); + GGML_ASSERT(!is_matmul_weight((const ggml_tensor *) src)); - if (!ggml_backend_buffer_is_cann(src->buffer) || - !ggml_backend_buffer_is_cann(dst->buffer)) { + if (!ggml_backend_buffer_is_cann(src->buffer) || !ggml_backend_buffer_is_cann(dst->buffer)) { return false; } - ggml_backend_buffer_t buf_src = - src->view_src ? src->view_src->buffer : src->buffer; - ggml_backend_buffer_t buf_dst = - dst->view_src ? dst->view_src->buffer : dst->buffer; + ggml_backend_buffer_t buf_src = src->view_src ? src->view_src->buffer : src->buffer; + ggml_backend_buffer_t buf_dst = dst->view_src ? dst->view_src->buffer : dst->buffer; - ggml_backend_cann_context* cann_ctx_src = - (ggml_backend_cann_context*)backend_src->context; - ggml_backend_cann_context* cann_ctx_dst = - (ggml_backend_cann_context*)backend_dst->context; + ggml_backend_cann_context * cann_ctx_src = (ggml_backend_cann_context *) backend_src->context; + ggml_backend_cann_context * cann_ctx_dst = (ggml_backend_cann_context *) backend_dst->context; size_t copy_size = ggml_nbytes(dst); if (copy_size == 0) { @@ -2084,17 +2000,14 @@ static bool ggml_backend_cann_cpy_tensor_async( // TODO: Support 310p P2P copy return false; #endif - ggml_backend_cann_buffer_context* buf_ctx_src = - (ggml_backend_cann_buffer_context*)buf_src->context; - ggml_backend_cann_buffer_context* buf_ctx_dst = - (ggml_backend_cann_buffer_context*)buf_dst->context; + ggml_backend_cann_buffer_context * buf_ctx_src = (ggml_backend_cann_buffer_context *) buf_src->context; + ggml_backend_cann_buffer_context * buf_ctx_dst = (ggml_backend_cann_buffer_context *) buf_dst->context; GGML_ASSERT(cann_ctx_src->device == buf_ctx_src->device); GGML_ASSERT(cann_ctx_dst->device == buf_ctx_dst->device); int32_t canAccessPeer = 0; - ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, cann_ctx_src->device, - cann_ctx_dst->device)); + ACL_CHECK(aclrtDeviceCanAccessPeer(&canAccessPeer, cann_ctx_src->device, cann_ctx_dst->device)); if (!canAccessPeer) { return false; } @@ -2106,8 +2019,7 @@ static bool ggml_backend_cann_cpy_tensor_async( // wait for task_queue empty to keep task order. cann_ctx_src->task_queue.wait(); - ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size, - ACL_MEMCPY_DEVICE_TO_DEVICE, + ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size, ACL_MEMCPY_DEVICE_TO_DEVICE, cann_ctx_src->stream())); // record event on src stream after the copy // TODO: this event is not effective with acl graph mode, change to use aclrtSynchronizeStream @@ -2122,8 +2034,7 @@ static bool ggml_backend_cann_cpy_tensor_async( ACL_CHECK(aclrtSynchronizeStream(cann_ctx_src->stream())); } else { // src and dst are on the same backend - ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size, - ACL_MEMCPY_DEVICE_TO_DEVICE, + ACL_CHECK(aclrtMemcpyAsync(dst->data, copy_size, src->data, copy_size, ACL_MEMCPY_DEVICE_TO_DEVICE, cann_ctx_dst->stream())); } @@ -2139,8 +2050,7 @@ static bool ggml_backend_cann_cpy_tensor_async( * @param backend Pointer to the CANN backend structure to synchronize. */ static void ggml_backend_cann_synchronize(ggml_backend_t backend) { - ggml_backend_cann_context* cann_ctx = - (ggml_backend_cann_context*)backend->context; + ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; cann_ctx->task_queue.wait(); ggml_cann_set_device(cann_ctx->device); ACL_CHECK(aclrtSynchronizeStream(cann_ctx->stream())); @@ -2168,16 +2078,14 @@ static void ggml_backend_cann_synchronize(ggml_backend_t backend) { * @param cann_ctx The CANN backend context containing the graph cache. * @param cgraph The current ggml computation graph. */ -static void add_lru_matched_graph_node_properties( - ggml_backend_cann_context * cann_ctx, - ggml_cgraph * cgraph) { +static void add_lru_matched_graph_node_properties(ggml_backend_cann_context * cann_ctx, ggml_cgraph * cgraph) { // Create a new ggml_cann_graph object on the heap (its lifetime is managed by the cache). ggml_cann_graph * new_graph = new ggml_cann_graph(); new_graph->ggml_graph_properties.resize(cgraph->n_nodes); for (int node_idx = 0; node_idx < cgraph->n_nodes; ++node_idx) { ggml_tensor * node = cgraph->nodes[node_idx]; - auto & prop = new_graph->ggml_graph_properties[node_idx]; + auto & prop = new_graph->ggml_graph_properties[node_idx]; prop.node_address = node->data; prop.node_op = node->op; @@ -2214,11 +2122,9 @@ static void add_lru_matched_graph_node_properties( * @param graph_node_properties The stored properties of a CANN graph node. * @return true if all fields match (excluding GGML_OP_VIEW); false otherwise. */ -static bool ggml_graph_node_has_matching_properties( - ggml_tensor * node, - ggml_graph_node_properties * graph_node_properties) { - if (node->data != graph_node_properties->node_address && - node->op != GGML_OP_VIEW) { +static bool ggml_graph_node_has_matching_properties(ggml_tensor * node, + ggml_graph_node_properties * graph_node_properties) { + if (node->data != graph_node_properties->node_address && node->op != GGML_OP_VIEW) { return false; } @@ -2237,8 +2143,7 @@ static bool ggml_graph_node_has_matching_properties( for (int i = 0; i < GGML_MAX_SRC; i++) { if (node->src[i]) { - if (node->src[i]->data != graph_node_properties->src_address[i] && - node->op != GGML_OP_VIEW) { + if (node->src[i]->data != graph_node_properties->src_address[i] && node->op != GGML_OP_VIEW) { return false; } @@ -2280,8 +2185,8 @@ static bool ggml_graph_node_has_matching_properties( * @return true if a matching cached graph exists; false otherwise. */ static bool is_matched_graph(ggml_backend_cann_context * cann_ctx, ggml_cgraph * cgraph) { - ggml_cann_graph_lru_cache &lru_cache = cann_ctx->graph_lru_cache; - for (auto &graph_ptr : lru_cache.cache_list) { + ggml_cann_graph_lru_cache & lru_cache = cann_ctx->graph_lru_cache; + for (auto & graph_ptr : lru_cache.cache_list) { // Skip graphs with a different number of nodes. if (graph_ptr->ggml_graph_properties.size() != static_cast(cgraph->n_nodes)) { continue; @@ -2320,21 +2225,24 @@ static bool is_matched_graph(ggml_backend_cann_context * cann_ctx, ggml_cgraph * * @param use_cann_graph Whether to use CANN graph execution. * @param cann_graph_update_required Whether graph capture is needed due to graph changes. */ -static void evaluate_and_capture_cann_graph(ggml_backend_cann_context * cann_ctx, ggml_cgraph * cgraph, - bool & use_cann_graph, bool & cann_graph_update_required) { +static void evaluate_and_capture_cann_graph(ggml_backend_cann_context * cann_ctx, + ggml_cgraph * cgraph, + bool & use_cann_graph, + bool & cann_graph_update_required) { #ifdef USE_ACL_GRAPH - ggml_cann_graph* matched_graph = cann_ctx->graph_lru_cache.cache_list.front(); + ggml_cann_graph * matched_graph = cann_ctx->graph_lru_cache.cache_list.front(); if (use_cann_graph && cann_graph_update_required) { ACL_CHECK(aclmdlRICaptureBegin(cann_ctx->stream(), ACL_MODEL_RI_CAPTURE_MODE_GLOBAL)); } -#endif // USE_ACL_GRAPH +#endif // USE_ACL_GRAPH // Only perform the graph execution if CANN graphs are not enabled, or we are capturing the graph. // With the use of CANN graphs, the execution will be performed by the graph launch. if (!use_cann_graph || cann_graph_update_required) { for (int i = 0; i < cgraph->n_nodes; i++) { ggml_tensor * node = cgraph->nodes[i]; - if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) { + if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || + node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) { continue; } @@ -2347,7 +2255,7 @@ static void evaluate_and_capture_cann_graph(ggml_backend_cann_context * cann_ctx } #ifdef USE_ACL_GRAPH - if (use_cann_graph && cann_graph_update_required) { // End CANN graph capture + if (use_cann_graph && cann_graph_update_required) { // End CANN graph capture ACL_CHECK(aclmdlRICaptureEnd(cann_ctx->stream(), &matched_graph->graph)); } @@ -2355,10 +2263,9 @@ static void evaluate_and_capture_cann_graph(ggml_backend_cann_context * cann_ctx // Execute graph ACL_CHECK(aclmdlRIExecuteAsync(matched_graph->graph, cann_ctx->stream())); } -#endif // USE_ACL_GRAPH +#endif // USE_ACL_GRAPH } - /** * @brief Computes a computational graph using a CANN backend. * @@ -2371,10 +2278,8 @@ static void evaluate_and_capture_cann_graph(ggml_backend_cann_context * cann_ctx * @return enum ggml_status Returns GGML_STATUS_SUCCESS if computation * completes successfully, otherwise an appropriate error status. */ -static enum ggml_status ggml_backend_cann_graph_compute( - ggml_backend_t backend, ggml_cgraph* cgraph) { - ggml_backend_cann_context* cann_ctx = - (ggml_backend_cann_context*)backend->context; +static enum ggml_status ggml_backend_cann_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { + ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; ggml_cann_set_device(cann_ctx->device); g_nz_workspaces[cann_ctx->device].clear(); @@ -2382,7 +2287,7 @@ static enum ggml_status ggml_backend_cann_graph_compute( cann_ctx->rope_cache.cached = false; #ifdef USE_ACL_GRAPH - bool use_cann_graph = true; + bool use_cann_graph = true; bool cann_graph_update_required = false; static bool prefill_use_graph = parse_bool(get_env("GGML_CANN_PREFILL_USE_GRAPH").value_or("")); @@ -2413,15 +2318,10 @@ static enum ggml_status ggml_backend_cann_graph_compute( } } #else - bool use_cann_graph = false; + bool use_cann_graph = false; bool cann_graph_update_required = false; #endif // USE_ACL_GRAPH - evaluate_and_capture_cann_graph( - cann_ctx, - cgraph, - use_cann_graph, - cann_graph_update_required - ); + evaluate_and_capture_cann_graph(cann_ctx, cgraph, use_cann_graph, cann_graph_update_required); return GGML_STATUS_SUCCESS; } @@ -2438,8 +2338,7 @@ static enum ggml_status ggml_backend_cann_graph_compute( * @return bool Returns true if the operation is supported by the backend, * otherwise false. */ -static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, - const ggml_tensor* op) { +static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) { switch (op->op) { case GGML_OP_UNARY: switch (ggml_get_unary_op(op)) { @@ -2474,24 +2373,24 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, return false; } break; - case GGML_OP_MUL_MAT: { - switch (op->src[0]->type) { - case GGML_TYPE_F16: - case GGML_TYPE_F32: - return true; - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q4_0: + case GGML_OP_MUL_MAT: + { + switch (op->src[0]->type) { + case GGML_TYPE_F16: + case GGML_TYPE_F32: + return true; + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q4_0: #ifdef ASCEND_310P - // Q4 && Q8 per group is not support on 310p device - return false; + // Q4 && Q8 per group is not support on 310p device + return false; #endif - // only support contiguous for quantized types. - return ggml_is_contiguous(op->src[0]) && - ggml_is_contiguous(op->src[1]); - default: - return false; + // only support contiguous for quantized types. + return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); + default: + return false; + } } - } case GGML_OP_MUL_MAT_ID: switch (op->src[0]->type) { case GGML_TYPE_F16: @@ -2504,99 +2403,107 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, return false; #endif // only support contiguous for quantized types. - return ggml_is_contiguous(op->src[0]) && - ggml_is_contiguous(op->src[1]); + return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); default: return false; } // embedding - case GGML_OP_GET_ROWS: { - switch (op->src[0]->type) { - case GGML_TYPE_F32: - case GGML_TYPE_F16: - case GGML_TYPE_Q8_0: - return true; - default: - return false; - } - } break; - case GGML_OP_SET_ROWS: { - switch (op->type) { - case GGML_TYPE_F32: - case GGML_TYPE_F16: - return true; - default: - return false; + case GGML_OP_GET_ROWS: + { + switch (op->src[0]->type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + case GGML_TYPE_Q8_0: + return true; + default: + return false; + } } - } break; - case GGML_OP_CPY: { - ggml_tensor *src = op->src[0]; - if ((op->type != GGML_TYPE_F32 && op->type != GGML_TYPE_F16) || - (src->type != GGML_TYPE_F32 && - src->type != GGML_TYPE_F16)) { - // only support F32 and F16. - return false; + break; + case GGML_OP_SET_ROWS: + { + switch (op->type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + return true; + default: + return false; + } } - return true; - } break; - case GGML_OP_CONT: { - // TODO: support GGML_TYPE_BF16 - switch (op->src[0]->type) { - case GGML_TYPE_F32: - case GGML_TYPE_F16: - return true; - default: + break; + case GGML_OP_CPY: + { + ggml_tensor * src = op->src[0]; + if ((op->type != GGML_TYPE_F32 && op->type != GGML_TYPE_F16) || + (src->type != GGML_TYPE_F32 && src->type != GGML_TYPE_F16)) { + // only support F32 and F16. return false; + } + return true; } - } - case GGML_OP_ROPE: { - // TODO: with ops-test v == 1 - // TODO: n_dims <= ne0 - if (op->src[0]->ne[0] != op->op_params[1]) { - return false; + break; + case GGML_OP_CONT: + { + // TODO: support GGML_TYPE_BF16 + switch (op->src[0]->type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + return true; + default: + return false; + } } + case GGML_OP_ROPE: + { + // TODO: with ops-test v == 1 + // TODO: n_dims <= ne0 + if (op->src[0]->ne[0] != op->op_params[1]) { + return false; + } - const int mode = ((const int32_t *) op->op_params)[2]; - if (mode & GGML_ROPE_TYPE_MROPE) { - return false; - } - if (mode & GGML_ROPE_TYPE_VISION) { - return false; - } + const int mode = ((const int32_t *) op->op_params)[2]; + if (mode & GGML_ROPE_TYPE_MROPE) { + return false; + } + if (mode & GGML_ROPE_TYPE_VISION) { + return false; + } #ifdef ASCEND_310P - if(!ggml_is_contiguous(op->src[0])){ - return false; - } + if (!ggml_is_contiguous(op->src[0])) { + return false; + } #endif - return true; - } - case GGML_OP_UPSCALE: { - // aclnnUpsampleNearest2dGetWorkspaceSize not support - // selfDimN[2]/outDimN[2] or selfDimC[3]/outDimC[3] not equal - if (op->src[0]->ne[2] * op->ne[3] != op->src[0]->ne[3] * op->ne[2]) { - return false; + return true; } - if (op->op_params[0] != GGML_SCALE_MODE_NEAREST) { - return false; + case GGML_OP_UPSCALE: + { + // aclnnUpsampleNearest2dGetWorkspaceSize not support + // selfDimN[2]/outDimN[2] or selfDimC[3]/outDimC[3] not equal + if (op->src[0]->ne[2] * op->ne[3] != op->src[0]->ne[3] * op->ne[2]) { + return false; + } + if (op->op_params[0] != GGML_SCALE_MODE_NEAREST) { + return false; + } + return true; } - return true; - } - case GGML_OP_POOL_2D: { - const int32_t * opts = (const int32_t *) op->op_params; + case GGML_OP_POOL_2D: + { + const int32_t * opts = (const int32_t *) op->op_params; #ifdef ASCEND_310P - enum ggml_op_pool opt = static_cast(opts[0]); - if(opt == GGML_OP_POOL_MAX){ - return false; - } + enum ggml_op_pool opt = static_cast(opts[0]); + if (opt == GGML_OP_POOL_MAX) { + return false; + } #endif - const int k0 = opts[1]; - const int k1 = opts[2]; - const int p0 = opts[5]; - const int p1 = opts[6]; - // value of paddingH should be at most half of kernelH - // value of paddingW should be at most half of kernelW - return (p0 <= (k0 / 2)) && (p1 <= (k1 / 2)); - } + const int k0 = opts[1]; + const int k1 = opts[2]; + const int p0 = opts[5]; + const int p1 = opts[6]; + // value of paddingH should be at most half of kernelH + // value of paddingW should be at most half of kernelW + return (p0 <= (k0 / 2)) && (p1 <= (k1 / 2)); + } case GGML_OP_DUP: case GGML_OP_SUM: case GGML_OP_IM2COL: @@ -2639,48 +2546,50 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, return (op->src[0]->ne[0] - 1) <= 255; case GGML_OP_SCALE: float bias; - memcpy(&bias, (const float *)(op->op_params) + 1, sizeof(float)); - return bias == 0.0f; // TODO: support bias != 0.0f + memcpy(&bias, (const float *) (op->op_params) + 1, sizeof(float)); + return bias == 0.0f; // TODO: support bias != 0.0f case GGML_OP_SOFT_MAX: // TODO: support attention sinks [TAG_ATTN_SINKS] if (op->src[2]) { return false; } return true; - case GGML_OP_FLASH_ATTN_EXT:{ + case GGML_OP_FLASH_ATTN_EXT: + { #ifdef ASCEND_310P - // FA not support on 310p device - return false; -#endif - // derived from [ggml-cuda.cu] - if(op->src[1]->type != GGML_TYPE_F16 || op->src[2]->type != GGML_TYPE_F16){ - return false; - } - if(op->src[1]->type != GGML_TYPE_F16 && op->src[1]->type != GGML_TYPE_F32 && op->src[1]->type != GGML_TYPE_BF16){ - return false; - } - if(op->type != GGML_TYPE_F16 && op->type != GGML_TYPE_F32 && op->type != GGML_TYPE_BF16){ - return false; - } - // TODO: support attention sinks [TAG_ATTN_SINKS] - if (op->src[4]) { - return false; - } - if (op->src[1]->ne[0] != op->src[2]->ne[0]) { - // different head sizes of K and V are not supported yet - return false; - } - if (op->src[0]->ne[0] % 16 != 0) { - // TODO: padding to support - return false; - } - float logitSoftcap = 0.0f; - memcpy(&logitSoftcap, (const float *)(op->op_params) + 2, sizeof(float)); - if(logitSoftcap != 0.0f) { + // FA not support on 310p device return false; +#endif + // derived from [ggml-cuda.cu] + if (op->src[1]->type != GGML_TYPE_F16 || op->src[2]->type != GGML_TYPE_F16) { + return false; + } + if (op->src[1]->type != GGML_TYPE_F16 && op->src[1]->type != GGML_TYPE_F32 && + op->src[1]->type != GGML_TYPE_BF16) { + return false; + } + if (op->type != GGML_TYPE_F16 && op->type != GGML_TYPE_F32 && op->type != GGML_TYPE_BF16) { + return false; + } + // TODO: support attention sinks [TAG_ATTN_SINKS] + if (op->src[4]) { + return false; + } + if (op->src[1]->ne[0] != op->src[2]->ne[0]) { + // different head sizes of K and V are not supported yet + return false; + } + if (op->src[0]->ne[0] % 16 != 0) { + // TODO: padding to support + return false; + } + float logitSoftcap = 0.0f; + memcpy(&logitSoftcap, (const float *) (op->op_params) + 2, sizeof(float)); + if (logitSoftcap != 0.0f) { + return false; + } + return true; } - return true; - } default: return false; } @@ -2717,8 +2626,7 @@ static bool ggml_backend_buft_is_cann(ggml_backend_buffer_type_t buft) { * @return bool Returns true if the operation should be offloaded, otherwise * false. */ -static bool ggml_backend_cann_offload_op(ggml_backend_dev_t dev, - const ggml_tensor* op) { +static bool ggml_backend_cann_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) { const int min_batch_size = 32; GGML_UNUSED(dev); @@ -2734,9 +2642,8 @@ static bool ggml_backend_cann_offload_op(ggml_backend_dev_t dev, * @param event Pointer to the event structure to be recorded. */ static void ggml_backend_cann_event_record(ggml_backend_t backend, ggml_backend_event_t event) { - ggml_backend_cann_context* cann_ctx = - (ggml_backend_cann_context*)backend->context; - ACL_CHECK(aclrtRecordEvent((aclrtEvent)event->context, cann_ctx->stream())); + ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; + ACL_CHECK(aclrtRecordEvent((aclrtEvent) event->context, cann_ctx->stream())); } /** @@ -2749,13 +2656,10 @@ static void ggml_backend_cann_event_record(ggml_backend_t backend, ggml_backend_ * @param event Pointer to the event structure that the backend needs to wait * for. */ -static void ggml_backend_cann_event_wait(ggml_backend_t backend, - ggml_backend_event_t event) { - ggml_backend_cann_context* cann_ctx = - (ggml_backend_cann_context*)backend->context; +static void ggml_backend_cann_event_wait(ggml_backend_t backend, ggml_backend_event_t event) { + ggml_backend_cann_context * cann_ctx = (ggml_backend_cann_context *) backend->context; if (ggml_backend_is_cann(backend)) { - ACL_CHECK(aclrtStreamWaitEvent(cann_ctx->stream(), - (aclrtEvent)event->context)); + ACL_CHECK(aclrtStreamWaitEvent(cann_ctx->stream(), (aclrtEvent) event->context)); } else { GGML_ABORT("fatal error"); } @@ -2794,30 +2698,30 @@ static const ggml_backend_i ggml_backend_cann_interface = { * @return A pointer to the static GUID. */ static ggml_guid_t ggml_backend_cann_guid() { - static ggml_guid guid = {0xa1, 0x94, 0xaf, 0xac, 0xbd, 0x4f, 0x47, 0x34, - 0xbe, 0x1a, 0x9e, 0x71, 0x1f, 0x9e, 0xed, 0x64}; + static ggml_guid guid = { 0xa1, 0x94, 0xaf, 0xac, 0xbd, 0x4f, 0x47, 0x34, + 0xbe, 0x1a, 0x9e, 0x71, 0x1f, 0x9e, 0xed, 0x64 }; return &guid; } // backend device struct ggml_backend_cann_device_context { - int device; + int device; std::string name; std::string description; }; static const char * ggml_backend_cann_device_get_name(ggml_backend_dev_t dev) { - ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context; + ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *) dev->context; return ctx->name.c_str(); } -static const char* ggml_backend_cann_device_get_description(ggml_backend_dev_t dev) { - ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context; +static const char * ggml_backend_cann_device_get_description(ggml_backend_dev_t dev) { + ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *) dev->context; return ctx->description.c_str(); } static void ggml_backend_cann_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { - ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context; + ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *) dev->context; ggml_backend_cann_get_device_memory(ctx->device, free, total); } @@ -2844,7 +2748,7 @@ static void ggml_backend_cann_device_get_props(ggml_backend_dev_t dev, ggml_back static ggml_backend_t ggml_backend_cann_device_init(ggml_backend_dev_t dev, const char * params) { GGML_UNUSED(params); - ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context; + ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *) dev->context; return ggml_backend_cann_init(ctx->device); } @@ -2861,19 +2765,17 @@ static ggml_backend_t ggml_backend_cann_device_init(ggml_backend_dev_t dev, cons * @return bool Returns true if the CANN backend supports the buffer type, * otherwise false. */ -static bool ggml_backend_cann_supports_buft( - ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { +static bool ggml_backend_cann_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { if (ggml_backend_buft_is_cann(buft)) { - ggml_backend_cann_device_context * dev_ctx = (ggml_backend_cann_device_context *)dev->context; - ggml_backend_cann_buffer_type_context * buft_ctx = - (ggml_backend_cann_buffer_type_context *)buft->context; + ggml_backend_cann_device_context * dev_ctx = (ggml_backend_cann_device_context *) dev->context; + ggml_backend_cann_buffer_type_context * buft_ctx = (ggml_backend_cann_buffer_type_context *) buft->context; return buft_ctx->device == dev_ctx->device; } return false; } static ggml_backend_buffer_type_t ggml_backend_cann_device_get_buffer_type(ggml_backend_dev_t dev) { - ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *)dev->context; + ggml_backend_cann_device_context * ctx = (ggml_backend_cann_device_context *) dev->context; return ggml_backend_cann_buffer_type(ctx->device); } @@ -2892,9 +2794,8 @@ static ggml_backend_buffer_type_t ggml_backend_cann_device_get_host_buffer_type( * @param backend Pointer to the CANN backend. * @return ggml_backend_event_t Returns a pointer to the new event structure. */ -static ggml_backend_event_t ggml_backend_cann_device_event_new( - ggml_backend_dev_t dev) { - ggml_backend_cann_device_context * dev_ctx = (ggml_backend_cann_device_context *)dev->context; +static ggml_backend_event_t ggml_backend_cann_device_event_new(ggml_backend_dev_t dev) { + ggml_backend_cann_device_context * dev_ctx = (ggml_backend_cann_device_context *) dev->context; ggml_cann_set_device(dev_ctx->device); @@ -2916,7 +2817,7 @@ static ggml_backend_event_t ggml_backend_cann_device_event_new( * @param event Pointer to the event structure to be freed. */ static void ggml_backend_cann_device_event_free(ggml_backend_dev_t dev, ggml_backend_event_t event) { - ACL_CHECK(aclrtDestroyEvent((aclrtEvent)event->context)); + ACL_CHECK(aclrtDestroyEvent((aclrtEvent) event->context)); delete event; GGML_UNUSED(dev); @@ -2930,7 +2831,7 @@ static void ggml_backend_cann_device_event_free(ggml_backend_dev_t dev, ggml_bac * @param event Pointer to the event structure to be synchronized. */ static void ggml_backend_cann_device_event_synchronize(ggml_backend_dev_t dev, ggml_backend_event_t event) { - ACL_CHECK(aclrtSynchronizeEvent((aclrtEvent)event->context)); + ACL_CHECK(aclrtSynchronizeEvent((aclrtEvent) event->context)); GGML_UNUSED(dev); } @@ -2941,10 +2842,10 @@ static const ggml_backend_device_i ggml_backend_cann_device_interface = { /* .get_memory = */ ggml_backend_cann_device_get_memory, /* .get_type = */ ggml_backend_cann_device_get_type, /* .get_props = */ ggml_backend_cann_device_get_props, - /* .init_backend = */ ggml_backend_cann_device_init, // called for every card + /* .init_backend = */ ggml_backend_cann_device_init, // called for every card /* .get_buffer_type = */ ggml_backend_cann_device_get_buffer_type, /* .get_host_buffer_type = */ ggml_backend_cann_device_get_host_buffer_type, - /* .buffer_from_host_ptr = */ NULL, // not supported for CANN + /* .buffer_from_host_ptr = */ NULL, // not supported for CANN /* .supports_op = */ ggml_backend_cann_supports_op, /* .supports_buft = */ ggml_backend_cann_supports_buft, /* .offload_op = */ ggml_backend_cann_offload_op, @@ -2953,7 +2854,6 @@ static const ggml_backend_device_i ggml_backend_cann_device_interface = { /* .event_synchronize = */ ggml_backend_cann_device_event_synchronize, }; - // backend reg struct ggml_backend_cann_reg_context { std::vector devices; @@ -2965,12 +2865,12 @@ static const char * ggml_backend_cann_reg_get_name(ggml_backend_reg_t reg) { } static size_t ggml_backend_cann_reg_get_device_count(ggml_backend_reg_t reg) { - ggml_backend_cann_reg_context * ctx = (ggml_backend_cann_reg_context *)reg->context; + ggml_backend_cann_reg_context * ctx = (ggml_backend_cann_reg_context *) reg->context; return ctx->devices.size(); } static ggml_backend_dev_t ggml_backend_cann_reg_get_device(ggml_backend_reg_t reg, size_t index) { - ggml_backend_cann_reg_context * ctx = (ggml_backend_cann_reg_context *)reg->context; + ggml_backend_cann_reg_context * ctx = (ggml_backend_cann_reg_context *) reg->context; GGML_ASSERT(index < ctx->devices.size()); return ctx->devices[index]; } @@ -2992,34 +2892,30 @@ static const ggml_backend_reg_i ggml_backend_cann_reg_interface = { // backend registry, called only once for cann backend ggml_backend_reg_t ggml_backend_cann_reg() { static ggml_backend_reg reg; - static bool initialized = false; + static bool initialized = false; { - static std::mutex mutex; + static std::mutex mutex; std::lock_guard lock(mutex); if (!initialized) { aclInit(nullptr); ggml_backend_cann_reg_context * ctx = new ggml_backend_cann_reg_context; for (int i = 0; i < ggml_cann_info().device_count; i++) { - ggml_backend_cann_device_context* dev_ctx = new ggml_backend_cann_device_context(); - dev_ctx->description = aclrtGetSocName(); - dev_ctx->device = i; - dev_ctx->name = GGML_CANN_NAME + std::to_string(i); + ggml_backend_cann_device_context * dev_ctx = new ggml_backend_cann_device_context(); + dev_ctx->description = aclrtGetSocName(); + dev_ctx->device = i; + dev_ctx->name = GGML_CANN_NAME + std::to_string(i); ggml_cann_set_device(i); - ggml_backend_dev_t dev = new ggml_backend_device { - /* .iface = */ ggml_backend_cann_device_interface, - /* .reg = */ ®, - /* .context = */ dev_ctx - }; + ggml_backend_dev_t dev = new ggml_backend_device{ /* .iface = */ ggml_backend_cann_device_interface, + /* .reg = */ ®, + /* .context = */ dev_ctx }; ctx->devices.push_back(dev); } - reg = ggml_backend_reg { - /* .api_version = */ GGML_BACKEND_API_VERSION, - /* .iface = */ ggml_backend_cann_reg_interface, - /* .context = */ ctx - }; + reg = ggml_backend_reg{ /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ ggml_backend_cann_reg_interface, + /* .context = */ ctx }; } initialized = true; @@ -3035,39 +2931,36 @@ ggml_backend_t ggml_backend_cann_init(int32_t device) { return nullptr; } - ggml_backend_cann_context* ctx = new ggml_backend_cann_context(device); + ggml_backend_cann_context * ctx = new ggml_backend_cann_context(device); if (ctx == nullptr) { GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__); return nullptr; } ggml_cann_set_device(ctx->device); ggml_backend_t cann_backend = - new ggml_backend{/* .guid = */ ggml_backend_cann_guid(), - /* .interface = */ ggml_backend_cann_interface, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), device), - /* .context = */ ctx}; + new ggml_backend{ /* .guid = */ ggml_backend_cann_guid(), + /* .interface = */ ggml_backend_cann_interface, + /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), device), + /* .context = */ ctx }; return cann_backend; } bool ggml_backend_is_cann(ggml_backend_t backend) { - return backend != NULL && - ggml_guid_matches(backend->guid, ggml_backend_cann_guid()); + return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_cann_guid()); } int32_t ggml_backend_cann_get_device_count() { return ggml_cann_info().device_count; } -void ggml_backend_cann_get_device_description( - int32_t device, char* description, size_t description_size) { +void ggml_backend_cann_get_device_description(int32_t device, char * description, size_t description_size) { ggml_cann_set_device(device); - const char* soc_name = aclrtGetSocName(); + const char * soc_name = aclrtGetSocName(); snprintf(description, description_size, "%s", soc_name); } -void ggml_backend_cann_get_device_memory(int32_t device, size_t* free, - size_t* total) { +void ggml_backend_cann_get_device_memory(int32_t device, size_t * free, size_t * total) { ggml_cann_set_device(device); ACL_CHECK(aclrtGetMemInfo(ACL_HBM_MEM, free, total)); } diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index 42041b717aa22..34323afa0762a 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -466,29 +466,45 @@ function(ggml_add_cpu_backend_variant_impl tag_name) list(APPEND ARCH_FLAGS "-march=${MARCH_STR}" -mabi=lp64d) elseif (GGML_SYSTEM_ARCH STREQUAL "s390x") message(STATUS "s390x detected") - list(APPEND GGML_CPU_SOURCES ggml-cpu/arch/s390/quants.c) - file(READ "/proc/cpuinfo" CPUINFO_CONTENTS) - string(REGEX REPLACE "machine[ \t\r\n]*=[ \t\r\n]*([0-9]+)" "\\1" S390X_M ${CPUINFO_CONTENTS}) - - # TODO: Separation to determine activation of VX/VXE/VXE2 - if (${S390X_M} MATCHES "8561|8562") - message(STATUS "z15 target") - list(APPEND ARCH_FLAGS -march=z15) - elseif (${S390X_M} MATCHES "3931") - message(STATUS "z16 target") - list(APPEND ARCH_FLAGS -march=z16) - elseif (${S390X_M} MATCHES "9175|9176") - # NOTE: Only available from GCC 15.1.0 onwards. Any z17 machine with compile issues must first verify their GCC version. - # binutils must also be updated to the latest for the -march=z17 flag to work. Otherwise, use -march=arch15. - message(STATUS "z17 target") - list(APPEND ARCH_FLAGS -march=arch15) - else() - message(STATUS "Unknown target") - message(WARNING "Unknown target. If you are compiling for z14 and earlier, you might have to add -DGGML_VXE=OFF.") - list(APPEND ARCH_FLAGS -march=native -mtune=native) + list(APPEND GGML_CPU_SOURCES + ggml-cpu/arch/s390/quants.c) + + # for native compilation + if (GGML_NATIVE) + # check machine level to determine target + file(READ "/proc/cpuinfo" CPUINFO_CONTENTS) + string(REGEX REPLACE "machine[ \t\r\n]*=[ \t\r\n]*([0-9]+)" "\\1" S390X_M ${CPUINFO_CONTENTS}) + + # TODO: Separation to determine activation of VX/VXE/VXE2 + if (${S390X_M} MATCHES "8561|8562") + message(STATUS "z15 target") + list(APPEND ARCH_FLAGS -march=z15) + elseif (${S390X_M} MATCHES "3931") + message(STATUS "z16 target") + list(APPEND ARCH_FLAGS -march=z16) + elseif (${S390X_M} MATCHES "9175|9176") + # NOTE: Only available from GCC 15.1.0 onwards. Any z17 machine with compile issues must first verify their GCC version. + # binutils must also be updated to the latest for the -march=z17 flag to work. Otherwise, use -march=arch15. + message(STATUS "z17 target") + list(APPEND ARCH_FLAGS -march=arch15) + else() + message(STATUS "Unknown target") + message(WARNING "Unknown target. If you are compiling for z14 and earlier, you might have to add -DGGML_VXE=OFF.") + list(APPEND ARCH_FLAGS -march=native -mtune=native) + endif() + # for cross-compilation + elseif(GGML_CPU_ALL_VARIANTS) + # range through IBM z15 to z17 + # NOTE: update when a new hardware level is released + foreach (ZHW RANGE 15 17) + if(DEFINED GGML_INTERNAL_Z${ZHW}) + message(STATUS "z${ZHW} cross-compile target") + list(APPEND ARCH_FLAGS -march=z${ZHW}) + endif() + endforeach() endif() - if (GGML_VXE) + if (GGML_VXE OR GGML_INTERNAL_VXE) message(STATUS "VX/VXE/VXE2 enabled") list(APPEND ARCH_FLAGS -mvx -mzvector) list(APPEND ARCH_DEFINITIONS GGML_VXE) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 29c870600ba93..9ec485cfa2ff7 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -3567,13 +3567,17 @@ void ggml_cpu_init(void) { #ifdef GGML_USE_OPENMP //if (!getenv("OMP_WAIT_POLICY")) { // // set the wait policy to active, so that OpenMP threads don't sleep - // putenv("OMP_WAIT_POLICY=active"); + // setenv("OMP_WAIT_POLICY", "active", 0) //} if (!getenv("KMP_BLOCKTIME")) { // set the time to wait before sleeping a thread // this is less aggressive than setting the wait policy to active, but should achieve similar results in most cases - putenv("KMP_BLOCKTIME=200"); // 200ms +#ifdef _WIN32 + _putenv_s("KMP_BLOCKTIME", "200"); // 200ms +#else + setenv("KMP_BLOCKTIME", "200", 0); // 200ms +#endif } #endif } diff --git a/ggml/src/ggml-cpu/spacemit/ime.cpp b/ggml/src/ggml-cpu/spacemit/ime.cpp index 54d3dece0e03a..91fe1925eaa3a 100644 --- a/ggml/src/ggml-cpu/spacemit/ime.cpp +++ b/ggml/src/ggml-cpu/spacemit/ime.cpp @@ -485,8 +485,9 @@ template class tensor_ int32_t start = ith * task_per_thread; int32_t end = std::min((ith + 1) * task_per_thread, task_count); for (int32_t compute_idx = start; compute_idx < end; compute_idx++) { - int32_t gemm_idx = compute_idx / block_size_m; - int32_t m_idx = compute_idx % block_size_m * block_size_m; + int32_t gemm_idx = compute_idx / per_gemm_block_count_m; + int32_t block_idx_in_gemm = compute_idx % per_gemm_block_count_m; + int32_t m_idx = block_idx_in_gemm * block_size_m; const qnbitgemm_spacemit_ime_args & data = qnbitgemm_args[gemm_idx]; int32_t rows_tobe_handled = (gemm_m - m_idx) > block_size_m ? block_size_m : (gemm_m - m_idx); diff --git a/ggml/src/ggml-cuda/argsort.cu b/ggml/src/ggml-cuda/argsort.cu index 607ded8558b45..6e7b90d42783f 100644 --- a/ggml/src/ggml-cuda/argsort.cu +++ b/ggml/src/ggml-cuda/argsort.cu @@ -1,5 +1,81 @@ #include "argsort.cuh" +#ifdef GGML_CUDA_USE_CUB +# include +using namespace cub; +#endif // GGML_CUDA_USE_CUB + +static __global__ void init_indices(int * indices, const int ncols, const int nrows) { + const int col = blockIdx.x * blockDim.x + threadIdx.x; + const int row = blockIdx.y; + + if (col < ncols && row < nrows) { + indices[row * ncols + col] = col; + } +} + +static __global__ void init_offsets(int * offsets, const int ncols, const int nrows) { + const int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx <= nrows) { + offsets[idx] = idx * ncols; + } +} + +#ifdef GGML_CUDA_USE_CUB +static void argsort_f32_i32_cuda_cub(ggml_cuda_pool & pool, + const float * x, + int * dst, + const int ncols, + const int nrows, + ggml_sort_order order, + cudaStream_t stream) { + ggml_cuda_pool_alloc temp_indices_alloc(pool, ncols * nrows); + ggml_cuda_pool_alloc temp_keys_alloc(pool, ncols * nrows); + ggml_cuda_pool_alloc offsets_alloc(pool, nrows + 1); + + int * temp_indices = temp_indices_alloc.get(); + float * temp_keys = temp_keys_alloc.get(); + int * d_offsets = offsets_alloc.get(); + + static const int block_size = 256; + const dim3 grid_size((ncols + block_size - 1) / block_size, nrows); + init_indices<<>>(temp_indices, ncols, nrows); + + const dim3 offset_grid((nrows + block_size - 1) / block_size); + init_offsets<<>>(d_offsets, ncols, nrows); + + cudaMemcpyAsync(temp_keys, x, ncols * nrows * sizeof(float), cudaMemcpyDeviceToDevice, stream); + + size_t temp_storage_bytes = 0; + + if (order == GGML_SORT_ORDER_ASC) { + DeviceSegmentedRadixSort::SortPairs(nullptr, temp_storage_bytes, temp_keys, temp_keys, // keys (in-place) + temp_indices, dst, // values (indices) + ncols * nrows, nrows, // num items, num segments + d_offsets, d_offsets + 1, 0, sizeof(float) * 8, // all bits + stream); + } else { + DeviceSegmentedRadixSort::SortPairsDescending(nullptr, temp_storage_bytes, temp_keys, temp_keys, temp_indices, + dst, ncols * nrows, nrows, d_offsets, d_offsets + 1, 0, + sizeof(float) * 8, stream); + } + + ggml_cuda_pool_alloc temp_storage_alloc(pool, temp_storage_bytes); + void * d_temp_storage = temp_storage_alloc.get(); + + if (order == GGML_SORT_ORDER_ASC) { + DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, temp_keys, temp_keys, temp_indices, dst, + ncols * nrows, nrows, d_offsets, d_offsets + 1, 0, sizeof(float) * 8, + stream); + } else { + DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, temp_keys, temp_keys, + temp_indices, dst, ncols * nrows, nrows, d_offsets, d_offsets + 1, + 0, sizeof(float) * 8, stream); + } +} +#endif // GGML_CUDA_USE_CUB + +// Bitonic sort implementation template static inline __device__ void ggml_cuda_swap(T & a, T & b) { T tmp = a; @@ -65,7 +141,12 @@ static int next_power_of_2(int x) { return n; } -static void argsort_f32_i32_cuda(const float * x, int * dst, const int ncols, const int nrows, ggml_sort_order order, cudaStream_t stream) { +static void argsort_f32_i32_cuda_bitonic(const float * x, + int * dst, + const int ncols, + const int nrows, + ggml_sort_order order, + cudaStream_t stream) { // bitonic sort requires ncols to be power of 2 const int ncols_pad = next_power_of_2(ncols); @@ -77,9 +158,11 @@ static void argsort_f32_i32_cuda(const float * x, int * dst, const int ncols, co GGML_ASSERT(shared_mem <= ggml_cuda_info().devices[ggml_cuda_get_device()].smpb); if (order == GGML_SORT_ORDER_ASC) { - k_argsort_f32_i32<<>>(x, dst, ncols, ncols_pad); + k_argsort_f32_i32 + <<>>(x, dst, ncols, ncols_pad); } else if (order == GGML_SORT_ORDER_DESC) { - k_argsort_f32_i32<<>>(x, dst, ncols, ncols_pad); + k_argsort_f32_i32 + <<>>(x, dst, ncols, ncols_pad); } else { GGML_ABORT("fatal error"); } @@ -100,5 +183,18 @@ void ggml_cuda_op_argsort(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0]; - argsort_f32_i32_cuda(src0_d, (int *)dst_d, ncols, nrows, order, stream); +#ifdef GGML_CUDA_USE_CUB + const int ncols_pad = next_power_of_2(ncols); + const size_t shared_mem = ncols_pad * sizeof(int); + const size_t max_shared_mem = ggml_cuda_info().devices[ggml_cuda_get_device()].smpb; + + if (shared_mem > max_shared_mem || ncols > 1024) { + ggml_cuda_pool & pool = ctx.pool(); + argsort_f32_i32_cuda_cub(pool, src0_d, (int *) dst_d, ncols, nrows, order, stream); + } else { + argsort_f32_i32_cuda_bitonic(src0_d, (int *) dst_d, ncols, nrows, order, stream); + } +#else + argsort_f32_i32_cuda_bitonic(src0_d, (int *) dst_d, ncols, nrows, order, stream); +#endif } diff --git a/ggml/src/ggml-cuda/binbcast.cu b/ggml/src/ggml-cuda/binbcast.cu index 60240102741f3..0e6d777b1e64a 100644 --- a/ggml/src/ggml-cuda/binbcast.cu +++ b/ggml/src/ggml-cuda/binbcast.cu @@ -272,7 +272,7 @@ static void launch_bin_bcast_pack(const ggml_tensor * src0, const ggml_tensor * const uint3 ne12 = init_fastdiv_values((uint32_t) cne1[2]); const uint3 ne13 = init_fastdiv_values((uint32_t) cne1[3]); - if (block_nums.z > 65535) { + if (block_nums.z > 65535 || block_nums.y > 65535) { int block_num = (ne0 * ne1 * ne2 * ne3 + block_size - 1) / block_size; const uint3 prod_012 = init_fastdiv_values((uint32_t) (ne0 * ne1 * ne2)); const uint3 prod_01 = init_fastdiv_values((uint32_t) (ne0 * ne1)); diff --git a/ggml/src/ggml-cuda/fattn-common.cuh b/ggml/src/ggml-cuda/fattn-common.cuh index bc0c2523cc82f..218ccff14e7cc 100644 --- a/ggml/src/ggml-cuda/fattn-common.cuh +++ b/ggml/src/ggml-cuda/fattn-common.cuh @@ -895,6 +895,7 @@ void launch_fattn( const dim3 block_dim(warp_size, nwarps, 1); int max_blocks_per_sm = 1; // Max. number of active blocks limited by occupancy. CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks_per_sm, fattn_kernel, block_dim.x * block_dim.y * block_dim.z, nbytes_shared)); + GGML_ASSERT(max_blocks_per_sm > 0); int parallel_blocks = max_blocks_per_sm; dim3 blocks_num; diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 75fd6db14c514..bc396b521af07 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2818,18 +2818,15 @@ static bool ggml_cuda_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, #endif //TODO: remove special case once ggml_can_fuse can handle empty nodes - std::initializer_list topk_moe_ops = ggml_cuda_topk_moe_ops(false); - std::initializer_list topk_moe_ops_with_norm = ggml_cuda_topk_moe_ops(true); - - if (ops.size() == topk_moe_ops_with_norm.size() && std::equal(ops.begin(), ops.end(), topk_moe_ops_with_norm.begin())) { - - if (node_idx + topk_moe_ops_with_norm.size() > (size_t)cgraph->n_nodes) { - return false; - } - - for (size_t i = 0; i < topk_moe_ops_with_norm.size(); i++) { - if (cgraph->nodes[node_idx + i]->op != topk_moe_ops_with_norm.begin()[i]) return false; - } + std::initializer_list topk_moe_ops = + ggml_cuda_topk_moe_ops(/*with_norm*/ false, /*delayed_softmax=*/false); + std::initializer_list topk_moe_ops_with_norm = + ggml_cuda_topk_moe_ops(/*with_norm=*/true, /*delayed_softmax=*/false); + std::initializer_list topk_moe_ops_delayed_softmax = + ggml_cuda_topk_moe_ops(/*with_norm=*/false, /*delayed_softmax=*/true); + + if (ops.size() == topk_moe_ops_with_norm.size() && + ggml_can_fuse_subgraph(cgraph, node_idx, ops, { node_idx + 3, node_idx + 8 })) { ggml_tensor * softmax = cgraph->nodes[node_idx]; ggml_tensor * weights = cgraph->nodes[node_idx+8]; @@ -2838,18 +2835,20 @@ static bool ggml_cuda_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, } } - if (ops.size() == topk_moe_ops.size() && std::equal(ops.begin(), ops.end(), topk_moe_ops.begin())) { - - if (node_idx + topk_moe_ops.size() > (size_t)cgraph->n_nodes) { - return false; + if (ops.size() == topk_moe_ops.size() && + ggml_can_fuse_subgraph(cgraph, node_idx, ops, { node_idx + 3, node_idx + 4 })) { + ggml_tensor * softmax = cgraph->nodes[node_idx]; + ggml_tensor * weights = cgraph->nodes[node_idx+4]; + if (ggml_cuda_should_use_topk_moe(softmax, weights)) { + return true; } + } - for (size_t i = 0; i < topk_moe_ops.size(); i++) { - if (cgraph->nodes[node_idx + i]->op != topk_moe_ops.begin()[i]) return false; - } + if (ops.size() == topk_moe_ops_delayed_softmax.size() && + ggml_can_fuse_subgraph(cgraph, node_idx, ops, { node_idx + 2, node_idx + 5 })) { + ggml_tensor * softmax = cgraph->nodes[node_idx + 4]; + ggml_tensor * weights = cgraph->nodes[node_idx + 5]; - ggml_tensor * softmax = cgraph->nodes[node_idx]; - ggml_tensor * weights = cgraph->nodes[node_idx+4]; if (ggml_cuda_should_use_topk_moe(softmax, weights)) { return true; } @@ -2948,7 +2947,8 @@ static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx if (ggml_cuda_can_fuse(cgraph, i, ggml_cuda_topk_moe_ops(/*with norm*/ true), {})) { ggml_tensor * weights = cgraph->nodes[i+8]; ggml_tensor * selected_experts = cgraph->nodes[i+3]; - ggml_cuda_op_topk_moe(*cuda_ctx, node, weights, selected_experts, /*with norm*/ true); + ggml_cuda_op_topk_moe(*cuda_ctx, node->src[0], weights, selected_experts, /*with norm*/ true, + /*delayed softmax*/ false); i += 8; continue; } @@ -2956,11 +2956,23 @@ static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx if (ggml_cuda_can_fuse(cgraph, i, ggml_cuda_topk_moe_ops(/*with norm*/ false), {})) { ggml_tensor * weights = cgraph->nodes[i+4]; ggml_tensor * selected_experts = cgraph->nodes[i+3]; - ggml_cuda_op_topk_moe(*cuda_ctx, node, weights, selected_experts, /*with norm*/ false); + ggml_cuda_op_topk_moe(*cuda_ctx, node->src[0], weights, selected_experts, /*with norm*/ false, + /*delayed softmax*/ false); i += 4; continue; } + if (ggml_cuda_can_fuse(cgraph, i, + ggml_cuda_topk_moe_ops(/*with norm*/ false, /*delayed softmax*/ true), {})) { + ggml_tensor * weights = cgraph->nodes[i + 5]; + ggml_tensor * ids = cgraph->nodes[i + 1]; + + ggml_cuda_op_topk_moe(*cuda_ctx, node->src[0], weights, ids, /*with norm*/ false, + /*delayed_softmax*/ true); + i += 5; + continue; + } + if (node->op == GGML_OP_ADD) { int n_fuse = 0; ggml_op ops[8]; @@ -3630,8 +3642,11 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_SUM: return ggml_is_contiguous_rows(op->src[0]); case GGML_OP_ARGSORT: - // TODO: Support arbitrary column width +#ifndef GGML_CUDA_USE_CUB return op->src[0]->ne[0] <= 1024; +#else + return true; +#endif case GGML_OP_SUM_ROWS: case GGML_OP_MEAN: case GGML_OP_GROUP_NORM: diff --git a/ggml/src/ggml-cuda/topk-moe.cu b/ggml/src/ggml-cuda/topk-moe.cu index afe4aee2403b2..e28c810ac5df7 100644 --- a/ggml/src/ggml-cuda/topk-moe.cu +++ b/ggml/src/ggml-cuda/topk-moe.cu @@ -4,16 +4,61 @@ #include +// Warp-local softmax used for both the pre-top-k logits and the post-top-k delayed path. +template +__device__ void softmax_warp_inplace(float (&vals)[experts_per_thread], const int limit, const int lane) { + float max_val = -INFINITY; + +#pragma unroll + for (int i = 0; i < experts_per_thread; i++) { + const int idx = lane + i * WARP_SIZE; + const bool active = !use_limit || (idx < limit); + if (active) { + max_val = max(max_val, vals[i]); + } + } + + max_val = warp_reduce_max(max_val); + + float sum = 0.f; + +#pragma unroll + for (int i = 0; i < experts_per_thread; i++) { + const int idx = lane + i * WARP_SIZE; + const bool active = !use_limit || (idx < limit); + if (active) { + const float val = expf(vals[i] - max_val); + vals[i] = val; + sum += val; + } else { + vals[i] = 0.f; + } + } + + sum = warp_reduce_sum(sum); + + const float inv_sum = 1.0f / sum; + +#pragma unroll + for (int i = 0; i < experts_per_thread; i++) { + const int idx = lane + i * WARP_SIZE; + const bool active = !use_limit || (idx < limit); + if (active) { + vals[i] *= inv_sum; + } + } +} + /* This kernel does the following: - 1. softmax over the logits per token [n_experts, n_tokens] + 1. optionally softmax over the logits per token [n_experts, n_tokens] 2. argmax reduce over the top-k (n_experts_used) logits 3. write weights + ids to global memory - 4. optionally normalize the weights + 4. optionally normalize the weights or apply softmax over the selected logits It is intended as fusion of softmax->top-k->get_rows pipeline for MoE models */ -template +template __launch_bounds__(4 * WARP_SIZE, 1) __global__ void topk_moe_cuda(const float * logits, float * weights, int32_t * ids, @@ -30,52 +75,31 @@ __launch_bounds__(4 * WARP_SIZE, 1) __global__ void topk_moe_cuda(const float * constexpr int experts_per_thread = (n_experts > WARP_SIZE) ? n_experts / WARP_SIZE : 1; - float logits_r[experts_per_thread]; + float wt[experts_per_thread]; #pragma unroll for (int i = 0; i < n_experts; i += WARP_SIZE) { - const int expert = i + threadIdx.x; - logits_r[i / WARP_SIZE] = n_experts % WARP_SIZE == 0 || expert < n_experts ? logits[expert] : -INFINITY; + const int expert = i + threadIdx.x; + wt[i / WARP_SIZE] = (n_experts % WARP_SIZE == 0 || expert < n_experts) ? logits[expert] : -INFINITY; } - float max_val = logits_r[0]; - -#pragma unroll - for (int i = 1; i < experts_per_thread; i++) { - const float val = logits_r[i]; - max_val = max(val, max_val); + if constexpr (!delayed_softmax) { + softmax_warp_inplace(wt, n_experts, threadIdx.x); } - max_val = warp_reduce_max(max_val); - - float wt[experts_per_thread]; - float tmp = 0.f; - -#pragma unroll - for (int i = 0; i < experts_per_thread; i++) { - const float val = logits_r[i]; - wt[i] = expf(val - max_val); - tmp += wt[i]; - } + //at this point, each thread holds either a portion of the softmax distribution + //or the raw logits. We do the argmax reduce over n_expert_used, each time marking + //the expert weight as -inf to exclude from the next iteration - tmp = warp_reduce_sum(tmp); + float wt_sum = 0.f; - const float inv_sum = 1.0f / tmp; + float output_weights[experts_per_thread]; #pragma unroll for (int i = 0; i < experts_per_thread; i++) { - wt[i] = wt[i] * inv_sum; + output_weights[i] = 0.f; } - //at this point, each thread holds a portion of softmax, - //we do the argmax reduce over n_expert_used, each time marking - //the expert weight as -inf to exclude from the next iteration - - float wt_sum = 0.f; - - extern __shared__ float data_topk_shared[]; - float * wt_shared_ptr = data_topk_shared + threadIdx.y * n_expert_used; - for (int k = 0; k < n_expert_used; k++) { float max_val = wt[0]; int max_expert = threadIdx.x; @@ -99,11 +123,14 @@ __launch_bounds__(4 * WARP_SIZE, 1) __global__ void topk_moe_cuda(const float * } } + if ((k & (WARP_SIZE - 1)) == threadIdx.x) { + output_weights[k / WARP_SIZE] = max_val; + } + if ((max_expert & (WARP_SIZE - 1)) == threadIdx.x) { wt[max_expert / WARP_SIZE] = -INFINITY; - wt_shared_ptr[k] = max_val; - ids[k] = max_expert; + ids[k] = max_expert; if constexpr (with_norm) { wt_sum += max_val; } @@ -114,17 +141,25 @@ __launch_bounds__(4 * WARP_SIZE, 1) __global__ void topk_moe_cuda(const float * wt_sum = warp_reduce_sum(wt_sum); const float inv_sum = 1.0f / wt_sum; - for (int i = threadIdx.x; i < n_expert_used; i += WARP_SIZE) { - wt_shared_ptr[i] = wt_shared_ptr[i] * inv_sum; + for (int i = 0; i < experts_per_thread; i++) { + output_weights[i] *= inv_sum; } } - for (int i = threadIdx.x; i < n_expert_used; i += WARP_SIZE) { - weights[i] = wt_shared_ptr[i]; + if constexpr (delayed_softmax) { + softmax_warp_inplace(output_weights, n_expert_used, threadIdx.x); + } + +#pragma unroll + for (int i = 0; i < experts_per_thread; i++) { + const int idx = i * WARP_SIZE + threadIdx.x; + if (idx < n_expert_used) { + weights[idx] = output_weights[i]; + } } } -template +template static void launch_topk_moe_cuda(ggml_backend_cuda_context & ctx, const float * logits, float * weights, @@ -132,53 +167,53 @@ static void launch_topk_moe_cuda(ggml_backend_cuda_context & ctx, const int n_rows, const int n_expert, const int n_expert_used) { + static_assert(!(with_norm && delayed_softmax), "delayed softmax is not supported with weight normalization"); + const int rows_per_block = 4; dim3 grid_dims((n_rows + rows_per_block - 1) / rows_per_block, 1, 1); dim3 block_dims(WARP_SIZE, rows_per_block, 1); cudaStream_t stream = ctx.stream(); - const int nbytes_shared = n_expert_used * rows_per_block * sizeof(float); - switch (n_expert) { case 1: - topk_moe_cuda<1, with_norm> - <<>>(logits, weights, ids, n_rows, n_expert_used); + topk_moe_cuda<1, with_norm, delayed_softmax> + <<>>(logits, weights, ids, n_rows, n_expert_used); break; case 2: - topk_moe_cuda<2, with_norm> - <<>>(logits, weights, ids, n_rows, n_expert_used); + topk_moe_cuda<2, with_norm, delayed_softmax> + <<>>(logits, weights, ids, n_rows, n_expert_used); break; case 4: - topk_moe_cuda<4, with_norm> - <<>>(logits, weights, ids, n_rows, n_expert_used); + topk_moe_cuda<4, with_norm, delayed_softmax> + <<>>(logits, weights, ids, n_rows, n_expert_used); break; case 8: - topk_moe_cuda<8, with_norm> - <<>>(logits, weights, ids, n_rows, n_expert_used); + topk_moe_cuda<8, with_norm, delayed_softmax> + <<>>(logits, weights, ids, n_rows, n_expert_used); break; case 16: - topk_moe_cuda<16, with_norm> - <<>>(logits, weights, ids, n_rows, n_expert_used); + topk_moe_cuda<16, with_norm, delayed_softmax> + <<>>(logits, weights, ids, n_rows, n_expert_used); break; case 32: - topk_moe_cuda<32, with_norm> - <<>>(logits, weights, ids, n_rows, n_expert_used); + topk_moe_cuda<32, with_norm, delayed_softmax> + <<>>(logits, weights, ids, n_rows, n_expert_used); break; case 64: - topk_moe_cuda<64, with_norm> - <<>>(logits, weights, ids, n_rows, n_expert_used); + topk_moe_cuda<64, with_norm, delayed_softmax> + <<>>(logits, weights, ids, n_rows, n_expert_used); break; case 128: - topk_moe_cuda<128, with_norm> - <<>>(logits, weights, ids, n_rows, n_expert_used); + topk_moe_cuda<128, with_norm, delayed_softmax> + <<>>(logits, weights, ids, n_rows, n_expert_used); break; case 256: - topk_moe_cuda<256, with_norm> - <<>>(logits, weights, ids, n_rows, n_expert_used); + topk_moe_cuda<256, with_norm, delayed_softmax> + <<>>(logits, weights, ids, n_rows, n_expert_used); break; case 512: - topk_moe_cuda<512, with_norm> - <<>>(logits, weights, ids, n_rows, n_expert_used); + topk_moe_cuda<512, with_norm, delayed_softmax> + <<>>(logits, weights, ids, n_rows, n_expert_used); break; default: GGML_ASSERT(false && "fatal error"); @@ -190,7 +225,8 @@ void ggml_cuda_op_topk_moe(ggml_backend_cuda_context & ctx, const ggml_tensor * logits, ggml_tensor * weights, ggml_tensor * ids, - const bool with_norm) { + const bool with_norm, + const bool delayed_softmax) { GGML_ASSERT(logits->type == GGML_TYPE_F32); GGML_ASSERT(weights->type == GGML_TYPE_F32); GGML_ASSERT(ids->type == GGML_TYPE_I32); @@ -198,7 +234,7 @@ void ggml_cuda_op_topk_moe(ggml_backend_cuda_context & ctx, const int n_experts = logits->ne[0]; const int n_rows = logits->ne[1]; - const float * logits_d = (const float *) logits->src[0]->data; + const float * logits_d = (const float *) logits->data; float * weights_d = (float *) weights->data; int32_t * ids_d = (int32_t *) ids->data; @@ -209,7 +245,11 @@ void ggml_cuda_op_topk_moe(ggml_backend_cuda_context & ctx, if (with_norm) { launch_topk_moe_cuda(ctx, logits_d, weights_d, ids_d, n_rows, n_experts, n_expert_used); } else { - launch_topk_moe_cuda(ctx, logits_d, weights_d, ids_d, n_rows, n_experts, n_expert_used); + if (delayed_softmax) { + launch_topk_moe_cuda(ctx, logits_d, weights_d, ids_d, n_rows, n_experts, n_expert_used); + } else { + launch_topk_moe_cuda(ctx, logits_d, weights_d, ids_d, n_rows, n_experts, n_expert_used); + } } } @@ -242,7 +282,7 @@ bool ggml_cuda_should_use_topk_moe(const ggml_tensor * softmax, const ggml_tenso return true; } -std::initializer_list ggml_cuda_topk_moe_ops(bool norm) { +std::initializer_list ggml_cuda_topk_moe_ops(bool norm, bool delayed_softmax) { static std::initializer_list norm_ops = { GGML_OP_SOFT_MAX, GGML_OP_RESHAPE, GGML_OP_ARGSORT, GGML_OP_VIEW, GGML_OP_GET_ROWS, GGML_OP_RESHAPE, GGML_OP_SUM_ROWS, GGML_OP_DIV, GGML_OP_RESHAPE }; @@ -250,8 +290,19 @@ std::initializer_list ggml_cuda_topk_moe_ops(bool norm) { static std::initializer_list no_norm_ops = { GGML_OP_SOFT_MAX, GGML_OP_RESHAPE, GGML_OP_ARGSORT, GGML_OP_VIEW, GGML_OP_GET_ROWS }; + static std::initializer_list delayed_softmax_ops = { GGML_OP_ARGSORT, GGML_OP_VIEW, + GGML_OP_GET_ROWS, GGML_OP_RESHAPE, + GGML_OP_SOFT_MAX, GGML_OP_RESHAPE }; + + GGML_ASSERT(!norm || !delayed_softmax); + + if (delayed_softmax) { + return delayed_softmax_ops; + } + if (norm) { return norm_ops; } + return no_norm_ops; } diff --git a/ggml/src/ggml-cuda/topk-moe.cuh b/ggml/src/ggml-cuda/topk-moe.cuh index 6613fb56507ea..cc2fbfe9e6649 100644 --- a/ggml/src/ggml-cuda/topk-moe.cuh +++ b/ggml/src/ggml-cuda/topk-moe.cuh @@ -6,9 +6,10 @@ void ggml_cuda_op_topk_moe(ggml_backend_cuda_context & ctx, const ggml_tensor * logits, ggml_tensor * weights, - ggml_tensor * top_k, - const bool with_norm); + ggml_tensor * ids, + const bool with_norm, + const bool delayed_softmax = false); bool ggml_cuda_should_use_topk_moe(const ggml_tensor * softmax, const ggml_tensor * weights); -std::initializer_list ggml_cuda_topk_moe_ops(bool with_norm); +std::initializer_list ggml_cuda_topk_moe_ops(bool with_norm, bool delayed_softmax = false); diff --git a/ggml/src/ggml-hexagon/CMakeLists.txt b/ggml/src/ggml-hexagon/CMakeLists.txt new file mode 100644 index 0000000000000..166825c2c5f71 --- /dev/null +++ b/ggml/src/ggml-hexagon/CMakeLists.txt @@ -0,0 +1,68 @@ +include(${HEXAGON_SDK_ROOT}/build/cmake/hexagon_fun.cmake) +include(ExternalProject) + +option(GGML_HEXAGON_HTP_DEBUG "ggml-hexagon: enable HTP debug output" OFF) + +add_library(htp_iface OBJECT + ${CMAKE_CURRENT_BINARY_DIR}/htp_iface_stub.c) + +set_target_properties(htp_iface PROPERTIES POSITION_INDEPENDENT_CODE ON) +target_include_directories(htp_iface PUBLIC + ${HEXAGON_SDK_ROOT}/incs + ${HEXAGON_SDK_ROOT}/incs/stddef + ${HEXAGON_SDK_ROOT}/utils/examples + ${CMAKE_CURRENT_SOURCE_DIR}/htp + ${CMAKE_CURRENT_BINARY_DIR}) + +build_idl(htp/htp_iface.idl htp_iface) + +if (CMAKE_SYSTEM_NAME MATCHES Android) + target_link_options(htp_iface PUBLIC -llog -ldl) +elseif (CMAKE_SYSTEM_NAME MATCHES Windows) + target_precompile_headers(htp_iface PUBLIC ) +else() + target_link_options(htp_iface PUBLIC -ldl) +endif() + +link_custom_library(htp_iface cdsprpc) +link_custom_library(htp_iface rpcmem) + +set(TARGET_NAME ggml-hexagon) +ggml_add_backend_library(${TARGET_NAME} + ggml-hexagon.cpp htp-utils.c htp-utils.h ../../include/ggml-hexagon.h) + +target_link_libraries(${TARGET_NAME} PRIVATE htp_iface) +target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/htp ${CMAKE_CURRENT_BINARY_DIR}) + +# Build HTP bits +set(HTP_CMAKE_ARGS + -DCMAKE_TOOLCHAIN_FILE=${CMAKE_CURRENT_SOURCE_DIR}/htp/cmake-toolchain.cmake + -DCMAKE_BUILD_TYPE=Release + -DCMAKE_INSTALL_LIBDIR=${CMAKE_CURRENT_BINARY_DIR} + -DHEXAGON_SDK_ROOT=$ENV{HEXAGON_SDK_ROOT} + -DHEXAGON_TOOLS_ROOT=$ENV{HEXAGON_TOOLS_ROOT} + -DHEXAGON_HTP_DEBUG=${GGML_HEXAGON_HTP_DEBUG}) + +ExternalProject_Add(htp-v73 + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/htp BUILD_ALWAYS ON + CMAKE_ARGS ${HTP_CMAKE_ARGS} -DDSP_VERSION=v73 -DPREBUILT_LIB_DIR="toolv19_v73") + +ExternalProject_Add(htp-v75 + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/htp BUILD_ALWAYS ON + CMAKE_ARGS ${HTP_CMAKE_ARGS} -DDSP_VERSION=v75 -DPREBUILT_LIB_DIR="toolv19_v75") + +ExternalProject_Add(htp-v79 + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/htp BUILD_ALWAYS ON + CMAKE_ARGS ${HTP_CMAKE_ARGS} -DDSP_VERSION=v79 -DPREBUILT_LIB_DIR="toolv19_v79") + +ExternalProject_Add(htp-v81 + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/htp BUILD_ALWAYS ON + CMAKE_ARGS ${HTP_CMAKE_ARGS} -DDSP_VERSION=v81 -DPREBUILT_LIB_DIR="toolv19_v81") + +# Install Hexagon skels required at runtime +install(FILES + ${CMAKE_CURRENT_BINARY_DIR}/libggml-htp-v73.so + ${CMAKE_CURRENT_BINARY_DIR}/libggml-htp-v75.so + ${CMAKE_CURRENT_BINARY_DIR}/libggml-htp-v79.so + ${CMAKE_CURRENT_BINARY_DIR}/libggml-htp-v81.so + TYPE LIB) diff --git a/ggml/src/ggml-hexagon/ggml-hexagon.cpp b/ggml/src/ggml-hexagon/ggml-hexagon.cpp new file mode 100644 index 0000000000000..ecfc1c856cb59 --- /dev/null +++ b/ggml/src/ggml-hexagon/ggml-hexagon.cpp @@ -0,0 +1,3757 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef _WIN32 +# include +# ifndef _WINDOWS +# define _WINDOWS +# endif +#else +# include +# include +#endif + +#pragma clang diagnostic ignored "-Wnested-anon-types" +#pragma clang diagnostic ignored "-Wgnu-anonymous-struct" + +#include "htp-utils.h" + +#include +#include +#include + +#define GGML_COMMON_IMPL_CPP +#include "ggml-backend-impl.h" +#include "ggml-common.h" +#include "ggml-hexagon.h" +#include "ggml-impl.h" +#include "ggml-quants.h" +#include "htp-msg.h" +#include "htp_iface.h" + +static size_t opt_ndev = 1; +static size_t opt_nhvx = 0; // use all +static int opt_arch = 0; // autodetect +static int opt_etm = 0; +static int opt_verbose = 0; +static int opt_profile = 0; +static int opt_hostbuf = 1; +static int opt_experimental = 0; + +// Enable all stages by default +static int opt_opmask = HTP_OPMASK_QUEUE | HTP_OPMASK_QUANTIZE | HTP_OPMASK_COMPUTE; +static int opt_opsync = 0; // synchronous ops + +#define HEX_VERBOSE(...) \ + if (opt_verbose) GGML_LOG_DEBUG(__VA_ARGS__) + +#define HEX_PROFILE(...) \ + if (opt_profile) GGML_LOG_INFO(__VA_ARGS__) + +static inline uint64_t hex_is_aligned(void * addr, uint32_t align) { + return ((size_t) addr & (align - 1)) == 0; +} + +static inline size_t hex_round_up(size_t n, size_t m) { + return m * ((n + m - 1) / m); +} + +static const char * status_to_str(uint32_t status) { + switch (status) { + case HTP_STATUS_OK: + return "OK"; + case HTP_STATUS_NO_SUPPORT: + return "NO-SUPPORT"; + case HTP_STATUS_INVAL_PARAMS: + return "INVAL-PARAMS"; + case HTP_STATUS_VTCM_TOO_SMALL: + return "VTCM-TOO-SMALL"; + case HTP_STATUS_INTERNAL_ERR: + return "INTERNAL-ERROR"; + default: + return "UNKNOWN"; + } +} + +// ** debug helpers + +static inline int hex_format_tensor_dims(char * str, const struct ggml_tensor * t) { + if (t->ne[2] == 1 && t->ne[3] == 1) { + return sprintf(str, "%d:%d", (int) t->ne[0], (int) t->ne[1]); + } else { + return sprintf(str, "%d:%d:%d:%d", (int) t->ne[0], (int) t->ne[1], (int) t->ne[2], (int) t->ne[3]); + } +} + +static inline void hex_format_op_dims(char * str, const struct ggml_tensor * t) { + char * p = str; + + // append src0 and src1 (if any) + if (t->src[0]) { + p += hex_format_tensor_dims(p, t->src[0]); + + for (int i = 1; i < GGML_MAX_SRC && t->src[i]; i++) { + p += sprintf(p, " x "); + p += hex_format_tensor_dims(p, t->src[i]); + } + + p += sprintf(p, " -> "); + } + + // format self dims separately for better visual alignment + char self[64]; + hex_format_tensor_dims(self, t); + + p += sprintf(p, "%s", self); +} + +static inline int hex_format_tensor_strides(char * str, const struct ggml_tensor * t) { + const char * c = ggml_is_contiguous(t) ? "" : "!"; + + if (t->ne[2] == 1 && t->ne[3] == 1) { + return sprintf(str, "%zu:%zu%s", (size_t) t->nb[0], (size_t) t->nb[1], c); + } else { + return sprintf(str, "%zu:%zu:%zu:%zu%s", (size_t) t->nb[0], (size_t) t->nb[1], (size_t) t->nb[2], + (size_t) t->nb[3], c); + } +} + +static inline void hex_format_op_strides(char * str, const struct ggml_tensor * t) { + char * p = str; + + // append src0 and src1 (if any) + if (t->src[0]) { + p += hex_format_tensor_strides(p, t->src[0]); + + for (int i = 1; i < GGML_MAX_SRC && t->src[i]; i++) { + p += sprintf(p, " x "); + p += hex_format_tensor_strides(p, t->src[i]); + } + + p += sprintf(p, " -> "); + } + + // format self dims separately for better visual alignment + char self[64]; + hex_format_tensor_strides(self, t); + + p += sprintf(p, "%s", self); +} + +static inline void hex_format_op_types(char * str, const struct ggml_tensor * t) { + char * p = str; + + // append src0 and src1 (if any) + if (t->src[0]) { + p += sprintf(p, "%s", ggml_type_name(t->src[0]->type)); + + for (int i = 1; i < GGML_MAX_SRC && t->src[i]; i++) { + p += sprintf(p, " x "); + p += sprintf(p, "%s", ggml_type_name(t->src[i]->type)); + } + + p += sprintf(p, " -> "); + } + + p += sprintf(p, "%s", ggml_type_name(t->type)); +} + +static inline const char * hex_tensor_buff_name(const struct ggml_tensor * t) { + if (t->buffer) { + return ggml_backend_buffer_name(t->buffer); + } + return "NONE"; +} + +static inline void hex_format_op_buffs(char * str, const struct ggml_tensor * t) { + char * p = str; + + // append src0 and src1 (if any) + if (t->src[0]) { + p += sprintf(p, "%s", hex_tensor_buff_name(t->src[0])); + + for (int i = 1; i < GGML_MAX_SRC && t->src[i]; i++) { + p += sprintf(p, " x "); + p += sprintf(p, "%s", hex_tensor_buff_name(t->src[i])); + } + + p += sprintf(p, " -> "); + } + + p += sprintf(p, "%s", hex_tensor_buff_name(t)); +} + +static inline void hex_format_op_names(char * str, const struct ggml_tensor * t) { + char * p = str; + + // append src0 and src1 (if any) + if (t->src[0]) { + p += sprintf(p, "%s", t->src[0]->name); + + for (int i = 1; i < GGML_MAX_SRC && t->src[i]; i++) { + p += sprintf(p, " x "); + p += sprintf(p, "%s", t->src[i]->name); + } + + p += sprintf(p, " -> "); + } + + p += sprintf(p, "%s", t->name); +} + +// ** backend sessions + +struct ggml_hexagon_session { + ggml_hexagon_session(int dev_id) noexcept(false); + ~ggml_hexagon_session() noexcept(true); + + void allocate(int dev_id) noexcept(false); + void release() noexcept(true); + + ggml_backend_buffer_type buffer_type; + ggml_backend_buffer_type repack_buffer_type; + + std::string name; + remote_handle64 handle; + dspqueue_t queue; + uint32_t session_id; + uint32_t domain_id; + uint64_t queue_id; + int dev_id; + bool valid_session; + bool valid_handle; + bool valid_queue; + bool valid_iface; + std::atomic op_pending; + uint32_t prof_usecs; + uint32_t prof_cycles; + uint32_t prof_pkts; +}; + +// Packet callback +static void htp_packet_callback(dspqueue_t queue, AEEResult error, void * context) { + auto sess = static_cast(context); + + // Repeatedly read packets from the queue until it's empty. We don't + // necessarily get a separate callback for each packet, and new packets + // may arrive while we're processing the previous one. + + while (1) { + struct htp_general_rsp rsp; + uint32_t rsp_size; + uint32_t flags; + + struct dspqueue_buffer bufs[HTP_MAX_PACKET_BUFFERS]; + uint32_t n_bufs; + + // Read packet from queue + int err = dspqueue_read_noblock(queue, &flags, + HTP_MAX_PACKET_BUFFERS, // Maximum number of buffer references + &n_bufs, // Number of buffer references + bufs, // Buffer references + sizeof(rsp), // Max message length + &rsp_size, // Message length + (uint8_t *) &rsp); + + if (err == AEE_EWOULDBLOCK) { + // Consumed all packets available for now + return; + } + + if (err != 0) { + GGML_ABORT("ggml-hex: dspqueue_read_noblock failed: 0x%08x\n", (unsigned) err); + } + + // Basic sanity checks + if (rsp_size != sizeof(rsp)) { + GGML_ABORT("ggml-hex: dspcall : bad response (size)\n"); + } + + if (rsp.status != HTP_STATUS_OK) { + GGML_LOG_ERROR("ggml-hex: dspcall : dsp-rsp: %s\n", status_to_str(rsp.status)); + // TODO: handle errors + } + + // FIXME: update profiling implementation + sess->prof_usecs = rsp.prof_usecs; + sess->prof_cycles = rsp.prof_cycles; + sess->prof_pkts = rsp.prof_pkts; + + sess->op_pending--; // atomic dec + } +} + +// Error callback - simply terminates with an error. Used where we don't +// expect errors. +[[noreturn]] static void htp_error_callback(dspqueue_t queue, AEEResult error, void * context) { + GGML_ABORT("ggml-hex: dspcall general error 0x%x: for queue %p\n", error, (void *) queue); +} + +// ** backend buffers + +struct ggml_backend_hexagon_buffer_type_context { + ggml_backend_hexagon_buffer_type_context(const std::string & name, ggml_hexagon_session * sess) { + this->sess = sess; + this->name = name; + } + + ggml_hexagon_session * sess; + std::string name; +}; + +struct ggml_backend_hexagon_buffer_context { + bool mmap_to(ggml_hexagon_session * s) { + HEX_VERBOSE("ggml-hex: %s mmaping buffer: base %p domain-id %d session-id %d size %zu fd %d repack %d\n", + s->name.c_str(), (void *) this->base, s->domain_id, s->session_id, this->size, this->fd, + (int) this->repack); + + int err = fastrpc_mmap(s->domain_id, this->fd, (void *) this->base, 0, this->size, FASTRPC_MAP_FD); + if (err != 0) { + GGML_LOG_ERROR("ggml-hex: buffer mapping failed : domain_id %d size %zu fd %d error 0x%08x\n", + s->domain_id, this->size, this->fd, (unsigned) err); + return false; + } + + return true; + } + + bool mmap() { + if (this->mapped) { + return true; + } + if (!mmap_to(this->sess)) { + return false; + } + this->mapped = true; + return true; + } + + void munmap() { + if (!this->mapped) { + return; + } + + fastrpc_munmap(this->sess->domain_id, this->fd, this->base, this->size); + this->mapped = false; + } + + ggml_backend_hexagon_buffer_context(ggml_hexagon_session * sess, size_t size, bool repack) { + size += 4 * 1024; // extra page for padding + + this->base = (uint8_t *) rpcmem_alloc2(RPCMEM_HEAP_ID_SYSTEM, RPCMEM_DEFAULT_FLAGS | RPCMEM_HEAP_NOREG, size); + if (!this->base) { + GGML_LOG_ERROR("ggml-hex: %s failed to allocate buffer : size %zu\n", sess->name.c_str(), size); + throw std::runtime_error("ggml-hex: rpcmem_alloc failed (see log for details)"); + } + + this->fd = rpcmem_to_fd(this->base); + if (this->fd < 0) { + GGML_LOG_ERROR("ggml-hex: %s failed to get FD for buffer %p\n", sess->name.c_str(), (void *) this->base); + rpcmem_free(this->base); + this->base = NULL; + throw std::runtime_error("ggml-hex: rpcmem_to_fd failed (see log for details)"); + } + + HEX_VERBOSE("ggml-hex: %s allocated buffer: base %p size %zu fd %d repack %d\n", sess->name.c_str(), + (void *) this->base, size, this->fd, (int) repack); + + this->sess = sess; + this->size = size; + this->mapped = false; + this->repack = repack; + } + + ~ggml_backend_hexagon_buffer_context() { + munmap(); + if (this->base) { + rpcmem_free(this->base); + this->base = NULL; + } + } + + ggml_hexagon_session * sess; // primary session + uint8_t * base; + size_t size; + int fd; + bool mapped; // mmap is done + bool repack; // repacked buffer +}; + +static ggml_hexagon_session * ggml_backend_hexagon_buffer_get_sess(ggml_backend_buffer_t buffer) { + return static_cast(buffer->buft->context)->sess; +} + +static void ggml_backend_hexagon_buffer_free_buffer(ggml_backend_buffer_t buffer) { + auto ctx = static_cast(buffer->context); + delete ctx; +} + +static void * ggml_backend_hexagon_buffer_get_base(ggml_backend_buffer_t buffer) { + auto ctx = static_cast(buffer->context); + return ctx->base; +} + +static enum ggml_status ggml_backend_hexagon_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { + auto ctx = static_cast(buffer->context); + auto sess = ctx->sess; + + HEX_VERBOSE("ggml-hex: %s init-tensor %s : base %p data %p nbytes %zu usage %d repack %d\n", sess->name.c_str(), + tensor->name, (void *) ctx->base, tensor->data, ggml_nbytes(tensor), (int) buffer->usage, + (int) ctx->repack); + + if (tensor->view_src != NULL && tensor->view_offs == 0) { + ; // nothing to do for the view + } else { + if (!ctx->mapped) { + ctx->mmap(); + } + } + return GGML_STATUS_SUCCESS; +} + +// ======== Q4x4x2 ==================== +struct x2_q4 { + int v[2]; +}; + +static x2_q4 unpack_q4(uint8_t v) { + x2_q4 x = { (int) (v & 0x0f) - 8, (int) (v >> 4) - 8 }; + return x; +} + +static void dump_block_q4_0(const block_q4_0 * b, int i) { + HEX_VERBOSE("ggml-hex: repack q4_0 %d: %d %d %d %d ... %d %d %d %d : %.6f\n", i, unpack_q4(b->qs[0]).v[0], + unpack_q4(b->qs[1]).v[0], unpack_q4(b->qs[2]).v[0], unpack_q4(b->qs[3]).v[0], unpack_q4(b->qs[12]).v[1], + unpack_q4(b->qs[13]).v[1], unpack_q4(b->qs[14]).v[1], unpack_q4(b->qs[15]).v[1], + GGML_FP16_TO_FP32(b->d)); +} + +static void dump_packed_block_q4x4x2(const uint8_t * v, unsigned int i, size_t k) { + static const int qk = QK_Q4_0x4x2; + const int dblk_size = 8 * 2; // 8x __fp16 + const int qblk_size = qk / 2; // int4 + const int qrow_size = k / 2; // int4 (not padded) + + const uint8_t * v_q = v + 0; // quants first + const uint8_t * v_d = v + qrow_size; // then scales + + const uint8_t * q = v_q + i * qblk_size; + const ggml_half * d = (const ggml_half *) (v_d + i * dblk_size); + + HEX_VERBOSE("ggml-hex: repack q4x4x2-%d: %d %d %d %d ... %d %d %d %d ... %d %d %d %d : %.6f %.6f %.6f %.6f\n", i, + unpack_q4(q[0]).v[0], unpack_q4(q[1]).v[0], unpack_q4(q[2]).v[0], unpack_q4(q[3]).v[0], + unpack_q4(q[60]).v[0], unpack_q4(q[61]).v[0], unpack_q4(q[62]).v[0], unpack_q4(q[63]).v[0], + unpack_q4(q[124]).v[0], unpack_q4(q[125]).v[0], unpack_q4(q[126]).v[0], unpack_q4(q[127]).v[0], + GGML_FP16_TO_FP32(d[0]), GGML_FP16_TO_FP32(d[1]), GGML_FP16_TO_FP32(d[2]), GGML_FP16_TO_FP32(d[3])); + + HEX_VERBOSE("ggml-hex: repack q4x4x2-%d: %d %d %d %d ... %d %d %d %d ... %d %d %d %d : %.6f %.6f %.6f %.6f\n", + i + 1, unpack_q4(q[0]).v[1], unpack_q4(q[1]).v[1], unpack_q4(q[2]).v[1], unpack_q4(q[3]).v[1], + unpack_q4(q[60]).v[1], unpack_q4(q[61]).v[1], unpack_q4(q[62]).v[1], unpack_q4(q[63]).v[1], + unpack_q4(q[124]).v[1], unpack_q4(q[125]).v[1], unpack_q4(q[126]).v[1], unpack_q4(q[127]).v[1], + GGML_FP16_TO_FP32(d[4]), GGML_FP16_TO_FP32(d[5]), GGML_FP16_TO_FP32(d[6]), GGML_FP16_TO_FP32(d[7])); +} + +static void unpack_q4_0_quants(uint8_t * qs, const block_q4_0 * x, unsigned int bi) { + static const int qk = QK4_0; + + for (unsigned int i = 0; i < qk / 2; ++i) { + const int x0 = (x->qs[i] & 0x0F); + const int x1 = (x->qs[i] >> 4); + qs[bi * qk + i + 0] = x0; + qs[bi * qk + i + qk / 2] = x1; + } +} + +static void pack_q4_0_quants(block_q4_0 * x, const uint8_t * qs, unsigned int bi) { + static const int qk = QK4_0; + + for (unsigned int i = 0; i < qk / 2; ++i) { + const uint8_t x0 = qs[bi * qk + i + 0]; + const uint8_t x1 = qs[bi * qk + i + qk / 2]; + x->qs[i] = x0 | (x1 << 4); + } +} + +static void repack_row_q4x4x2(uint8_t * y, const block_q4_0 * x, int64_t k) { + static const int qk = QK_Q4_0x4x2; + const int nb = (k + qk - 1) / qk; // number of blocks (padded) + + const int dblk_size = 8 * 2; // 8x __fp16 + const int qblk_size = qk / 2; // int4 + const int qrow_size = k / 2; // int4 (not padded to blocks) + + uint8_t * y_q = y + 0; // quants first + uint8_t * y_d = y + qrow_size; // then scales + + if (opt_verbose > 2) { + for (int i = 0; i < nb; i++) { + dump_block_q4_0(&x[i * 8 + 0], 0); + dump_block_q4_0(&x[i * 8 + 1], 1); + dump_block_q4_0(&x[i * 8 + 2], 2); + dump_block_q4_0(&x[i * 8 + 3], 3); + dump_block_q4_0(&x[i * 8 + 4], 4); + dump_block_q4_0(&x[i * 8 + 5], 5); + dump_block_q4_0(&x[i * 8 + 6], 6); + dump_block_q4_0(&x[i * 8 + 7], 7); + } + } + + // Repack the quants + for (int i = 0; i < nb; i++) { + uint8_t qs[QK_Q4_0x4x2]; // unpacked quants + unpack_q4_0_quants(qs, &x[i * 8 + 0], 0); + unpack_q4_0_quants(qs, &x[i * 8 + 1], 1); + unpack_q4_0_quants(qs, &x[i * 8 + 2], 2); + unpack_q4_0_quants(qs, &x[i * 8 + 3], 3); + unpack_q4_0_quants(qs, &x[i * 8 + 4], 4); + unpack_q4_0_quants(qs, &x[i * 8 + 5], 5); + unpack_q4_0_quants(qs, &x[i * 8 + 6], 6); + unpack_q4_0_quants(qs, &x[i * 8 + 7], 7); + + uint8_t * q = y_q + (i * qblk_size); + for (int j = 0; j < qk / 2; j++) { + q[j] = (qs[j + 128] << 4) | qs[j]; + } + } + + // Repack the scales + // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_Q4_0x4x2) + // the last block is truncated and overriden by the scales. + for (int i = 0; i < nb; i++) { + // Repack the scales + ggml_half * d = (ggml_half *) (y_d + i * dblk_size); + d[0] = x[i * 8 + 0].d; + d[1] = x[i * 8 + 1].d; + d[2] = x[i * 8 + 2].d; + d[3] = x[i * 8 + 3].d; + d[4] = x[i * 8 + 4].d; + d[5] = x[i * 8 + 5].d; + d[6] = x[i * 8 + 6].d; + d[7] = x[i * 8 + 7].d; + } + + if (opt_verbose > 1) { + for (int i = 0; i < nb; i++) { + dump_packed_block_q4x4x2(y, i, k); + } + } +} + +static void unpack_row_q4x4x2(block_q4_0 * x, const uint8_t * y, int64_t k) { + static const int qk = QK_Q4_0x4x2; + const int nb = (k + qk - 1) / qk; // number of blocks (padded) + + const int dblk_size = 8 * 2; // 8x __fp16 + const int qblk_size = qk / 2; // int4 + const int qrow_size = k / 2; // int4 (not padded to blocks) + + const uint8_t * y_q = y + 0; // quants first + const uint8_t * y_d = y + qrow_size; // then scales + + if (opt_verbose > 1) { + for (int i = 0; i < nb; i++) { + dump_packed_block_q4x4x2(y, i, k); + } + } + + // Unpack the quants + for (int i = 0; i < nb; i++) { + uint8_t qs[QK_Q4_0x4x2]; // unpacked quants + + const uint8_t * q = y_q + (i * qblk_size); + for (int j = 0; j < qk / 2; j++) { + qs[j] = q[j] & 0xf; + qs[j + 128] = q[j] >> 4; + } + + pack_q4_0_quants(&x[i * 8 + 0], qs, 0); + pack_q4_0_quants(&x[i * 8 + 1], qs, 1); + pack_q4_0_quants(&x[i * 8 + 2], qs, 2); + pack_q4_0_quants(&x[i * 8 + 3], qs, 3); + pack_q4_0_quants(&x[i * 8 + 4], qs, 4); + pack_q4_0_quants(&x[i * 8 + 5], qs, 5); + pack_q4_0_quants(&x[i * 8 + 6], qs, 6); + pack_q4_0_quants(&x[i * 8 + 7], qs, 7); + } + + // Repack the scales + // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_Q4_0x4x2) + // the last block is truncated and overriden by the scales. + for (int i = 0; i < nb; i++) { + // Unpack the scales + const ggml_half * d = (const ggml_half *) (y_d + i * dblk_size); + x[i * 8 + 0].d = d[0]; + x[i * 8 + 1].d = d[1]; + x[i * 8 + 2].d = d[2]; + x[i * 8 + 3].d = d[3]; + x[i * 8 + 4].d = d[4]; + x[i * 8 + 5].d = d[5]; + x[i * 8 + 6].d = d[6]; + x[i * 8 + 7].d = d[7]; + } + + if (opt_verbose > 2) { + for (int i = 0; i < nb; i++) { + dump_block_q4_0(&x[i * 8 + 0], 0); + dump_block_q4_0(&x[i * 8 + 1], 1); + dump_block_q4_0(&x[i * 8 + 2], 2); + dump_block_q4_0(&x[i * 8 + 3], 3); + dump_block_q4_0(&x[i * 8 + 4], 4); + dump_block_q4_0(&x[i * 8 + 5], 5); + dump_block_q4_0(&x[i * 8 + 6], 6); + dump_block_q4_0(&x[i * 8 + 7], 7); + } + } +} + +static void init_row_q4x4x2(block_q4_0 * x, int64_t k) { + static const int qk = QK_Q4_0x4x2; + const int nb = (k + qk - 1) / qk; // number of blocks (padded) + + // Init the quants such that they unpack into zeros + uint8_t qs[QK_Q4_0x4x2]; // unpacked quants + memset(qs, 8, sizeof(qs)); + + for (int i = 0; i < nb; i++) { + pack_q4_0_quants(&x[i * 8 + 0], qs, 0); + pack_q4_0_quants(&x[i * 8 + 1], qs, 1); + pack_q4_0_quants(&x[i * 8 + 2], qs, 2); + pack_q4_0_quants(&x[i * 8 + 3], qs, 3); + pack_q4_0_quants(&x[i * 8 + 4], qs, 4); + pack_q4_0_quants(&x[i * 8 + 5], qs, 5); + pack_q4_0_quants(&x[i * 8 + 6], qs, 6); + pack_q4_0_quants(&x[i * 8 + 7], qs, 7); + } + + // Init the scales + // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_Q4_0x4x2) + // the last block is truncated and overriden by the scales. + for (int i = 0; i < nb; i++) { + // Unpack the scales + x[i * 8 + 0].d = 0; + x[i * 8 + 1].d = 0; + x[i * 8 + 2].d = 0; + x[i * 8 + 3].d = 0; + x[i * 8 + 4].d = 0; + x[i * 8 + 5].d = 0; + x[i * 8 + 6].d = 0; + x[i * 8 + 7].d = 0; + } +} + +// repack q4_0 data into q4x4x2 tensor +static void repack_q4_0_q4x4x2(ggml_tensor * t, const void * data, size_t size) { + int64_t nrows = ggml_nrows(t); + + size_t row_size = ggml_row_size(t->type, t->ne[0]); + size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_Q4_0x4x2)); // extra elements for the pad + size_t row_size_rp = row_size * 2; // extra space for tmp pad (if any) + + void * buf_pd = ggml_aligned_malloc(row_size_pd); + GGML_ASSERT(buf_pd != NULL); + + void * buf_rp = ggml_aligned_malloc(row_size_rp); + GGML_ASSERT(buf_rp != NULL); + + HEX_VERBOSE("ggml-hex: repack-q4_0-q4x4x2 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, size, + t->ne[0], nrows, row_size); + + init_row_q4x4x2((block_q4_0 *) buf_pd, t->ne[0]); // init padded buffer to make sure the tail is all zeros + + for (int64_t i = 0; i < nrows; i++) { + const uint8_t * src = (const uint8_t *) data + (i * row_size); + uint8_t * dst = (uint8_t *) t->data + (i * row_size); + + memcpy(buf_pd, src, row_size); + repack_row_q4x4x2((uint8_t *) buf_rp, (const block_q4_0 *) buf_pd, t->ne[0]); + memcpy(dst, buf_rp, row_size); + } + + ggml_aligned_free(buf_pd, row_size_pd); + ggml_aligned_free(buf_rp, row_size_rp); +} + +// repack q4x4x2 tensor into q4_0 data +static void repack_q4x4x2_q4_0(void * data, const ggml_tensor * t, size_t size) { + int64_t nrows = ggml_nrows(t); + + size_t row_size = ggml_row_size(t->type, t->ne[0]); + size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_Q4_0x4x2)); // extra elements for the pad + size_t row_size_rp = row_size * 2; // extra space for tmp pad (if any) + + void * buf_pd = ggml_aligned_malloc(row_size_pd); + GGML_ASSERT(buf_pd != NULL); + + void * buf_rp = ggml_aligned_malloc(row_size_rp); + GGML_ASSERT(buf_rp != NULL); + + HEX_VERBOSE("ggml-hex: repack-q4x4x2-q4_0 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, size, + t->ne[0], nrows, row_size); + + memset(buf_pd, 0, row_size_pd); // clear-out padded buffer to make sure the tail is all zeros + + for (int64_t i = 0; i < nrows; i++) { + const uint8_t * src = (const uint8_t *) t->data + (i * row_size); + uint8_t * dst = (uint8_t *) data + (i * row_size); + + memcpy(buf_pd, src, row_size); + unpack_row_q4x4x2((block_q4_0 *) buf_rp, (const uint8_t *) buf_pd, t->ne[0]); + memcpy(dst, buf_rp, row_size); + } + + ggml_aligned_free(buf_pd, row_size_pd); + ggml_aligned_free(buf_rp, row_size_rp); +} + +// ======== Q8x4x2 ==================== +static void dump_block_q8_0(const block_q8_0 * b, int i) { + HEX_VERBOSE("ggml-hex: repack q8_0 %d: %d %d %d %d ... %d %d %d %d : %.6f\n", i, b->qs[0], b->qs[1], b->qs[2], + b->qs[3], b->qs[28], b->qs[29], b->qs[30], b->qs[31], GGML_FP16_TO_FP32(b->d)); +} + +static void dump_packed_block_q8x4x2(const uint8_t * v, unsigned int i, size_t k) { + static const int qk = QK_Q8_0x4x2; + const int dblk_size = 8 * 2; // 8x __fp16 + const int qblk_size = qk; // int8 + const int qrow_size = k; // int8 (not padded) + + const uint8_t * v_q = v + 0; // quants first + const uint8_t * v_d = v + qrow_size; // then scales + + const uint8_t * q = v_q + i * qblk_size; + const ggml_half * d = (const ggml_half *) (v_d + i * dblk_size); + + HEX_VERBOSE("ggml-hex: repack q8x4x2-%d: %d %d %d %d ... %d %d %d %d ... %d %d %d %d : %.6f %.6f %.6f %.6f\n", i, + q[0], q[1], q[2], q[3], q[60], q[61], q[62], q[63], q[124], q[125], q[126], q[127], + GGML_FP16_TO_FP32(d[0]), GGML_FP16_TO_FP32(d[1]), GGML_FP16_TO_FP32(d[2]), GGML_FP16_TO_FP32(d[3])); + + HEX_VERBOSE("ggml-hex: repack q8x4x2-%d: %d %d %d %d ... %d %d %d %d ... %d %d %d %d : %.6f %.6f %.6f %.6f\n", + i + 1, q[128], q[129], q[130], q[131], q[192], q[193], q[194], q[195], q[252], q[253], q[254], q[255], + GGML_FP16_TO_FP32(d[4]), GGML_FP16_TO_FP32(d[5]), GGML_FP16_TO_FP32(d[6]), GGML_FP16_TO_FP32(d[7])); +} + +static void unpack_q8_0_quants(uint8_t * qs, const block_q8_0 * x, unsigned int bi) { + static const int qk = QK8_0; + + for (unsigned int i = 0; i < qk; ++i) { + qs[bi * qk + i] = x->qs[i]; + } +} + +static void pack_q8_0_quants(block_q8_0 * x, const uint8_t * qs, unsigned int bi) { + static const int qk = QK8_0; + + for (unsigned int i = 0; i < qk; ++i) { + x->qs[i] = qs[bi * qk + i]; + } +} + +static void repack_row_q8x4x2(uint8_t * y, const block_q8_0 * x, int64_t k) { + static const int qk = QK_Q8_0x4x2; + const int nb = (k + qk - 1) / qk; // number of blocks (padded) + + const int dblk_size = 8 * 2; // 8x __fp16 + const int qblk_size = qk; // int8 + const int qrow_size = k; // int8 (not padded to blocks) + + uint8_t * y_q = y + 0; // quants first + uint8_t * y_d = y + qrow_size; // then scales + + if (opt_verbose > 2) { + for (int i = 0; i < nb; i++) { + dump_block_q8_0(&x[i * 8 + 0], 0); + dump_block_q8_0(&x[i * 8 + 1], 1); + dump_block_q8_0(&x[i * 8 + 2], 2); + dump_block_q8_0(&x[i * 8 + 3], 3); + dump_block_q8_0(&x[i * 8 + 4], 4); + dump_block_q8_0(&x[i * 8 + 5], 5); + dump_block_q8_0(&x[i * 8 + 6], 6); + dump_block_q8_0(&x[i * 8 + 7], 7); + } + } + + // Repack the quants + for (int i = 0; i < nb; i++) { + uint8_t qs[QK_Q8_0x4x2]; // unpacked quants + + unpack_q8_0_quants(qs, &x[i * 8 + 0], 0); + unpack_q8_0_quants(qs, &x[i * 8 + 1], 1); + unpack_q8_0_quants(qs, &x[i * 8 + 2], 2); + unpack_q8_0_quants(qs, &x[i * 8 + 3], 3); + unpack_q8_0_quants(qs, &x[i * 8 + 4], 4); + unpack_q8_0_quants(qs, &x[i * 8 + 5], 5); + unpack_q8_0_quants(qs, &x[i * 8 + 6], 6); + unpack_q8_0_quants(qs, &x[i * 8 + 7], 7); + + uint8_t * q = y_q + (i * qblk_size); + for (int j = 0; j < qk; j++) { + q[j] = qs[j]; + } + } + + // Repack the scales + // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_Q4_0x4x2) + // the last block is truncated and overriden by the scales. + for (int i = 0; i < nb; i++) { + // Repack the scales + ggml_half * d = (ggml_half *) (y_d + i * dblk_size); + d[0] = x[i * 8 + 0].d; + d[1] = x[i * 8 + 1].d; + d[2] = x[i * 8 + 2].d; + d[3] = x[i * 8 + 3].d; + d[4] = x[i * 8 + 4].d; + d[5] = x[i * 8 + 5].d; + d[6] = x[i * 8 + 6].d; + d[7] = x[i * 8 + 7].d; + } + + if (opt_verbose > 1) { + for (int i = 0; i < nb; i++) { + dump_packed_block_q8x4x2(y, i, k); + } + } +} + +static void unpack_row_q8x4x2(block_q8_0 * x, const uint8_t * y, int64_t k) { + static const int qk = QK_Q8_0x4x2; + const int nb = (k + qk - 1) / qk; // number of blocks (padded) + + const int dblk_size = 8 * 2; // 8x __fp16 + const int qblk_size = qk; // int8 + const int qrow_size = k; // int8 (not padded to blocks) + + const uint8_t * y_q = y + 0; // quants first + const uint8_t * y_d = y + qrow_size; // then scales + + if (opt_verbose > 1) { + for (int i = 0; i < nb; i++) { + dump_packed_block_q8x4x2(y, i, k); + } + } + + // Unpack the quants + for (int i = 0; i < nb; i++) { + uint8_t qs[QK_Q4_0x4x2]; // unpacked quants + + const uint8_t * q = y_q + (i * qblk_size); + for (int j = 0; j < qk; j++) { + qs[j] = q[j]; + } + + pack_q8_0_quants(&x[i * 8 + 0], qs, 0); + pack_q8_0_quants(&x[i * 8 + 1], qs, 1); + pack_q8_0_quants(&x[i * 8 + 2], qs, 2); + pack_q8_0_quants(&x[i * 8 + 3], qs, 3); + pack_q8_0_quants(&x[i * 8 + 4], qs, 4); + pack_q8_0_quants(&x[i * 8 + 5], qs, 5); + pack_q8_0_quants(&x[i * 8 + 6], qs, 6); + pack_q8_0_quants(&x[i * 8 + 7], qs, 7); + } + + // Repack the scales + // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_Q4_0x4x2) + // the last block is truncated and overriden by the scales. + for (int i = 0; i < nb; i++) { + // Unpack the scales + const ggml_half * d = (const ggml_half *) (y_d + i * dblk_size); + x[i * 8 + 0].d = d[0]; + x[i * 8 + 1].d = d[1]; + x[i * 8 + 2].d = d[2]; + x[i * 8 + 3].d = d[3]; + x[i * 8 + 4].d = d[4]; + x[i * 8 + 5].d = d[5]; + x[i * 8 + 6].d = d[6]; + x[i * 8 + 7].d = d[7]; + } + + if (opt_verbose > 2) { + for (int i = 0; i < nb; i++) { + dump_block_q8_0(&x[i * 8 + 0], 0); + dump_block_q8_0(&x[i * 8 + 1], 1); + dump_block_q8_0(&x[i * 8 + 2], 2); + dump_block_q8_0(&x[i * 8 + 3], 3); + dump_block_q8_0(&x[i * 8 + 4], 4); + dump_block_q8_0(&x[i * 8 + 5], 5); + dump_block_q8_0(&x[i * 8 + 6], 6); + dump_block_q8_0(&x[i * 8 + 7], 7); + } + } +} + +static void init_row_q8x4x2(block_q8_0 * x, int64_t k) { + static const int qk = QK_Q8_0x4x2; + const int nb = (k + qk - 1) / qk; // number of blocks (padded) + + // Init the quants such that they unpack into zeros + uint8_t qs[QK_Q8_0x4x2]; // unpacked quants + memset(qs, 0, sizeof(qs)); + + for (int i = 0; i < nb; i++) { + pack_q8_0_quants(&x[i * 8 + 0], qs, 0); + pack_q8_0_quants(&x[i * 8 + 1], qs, 1); + pack_q8_0_quants(&x[i * 8 + 2], qs, 2); + pack_q8_0_quants(&x[i * 8 + 3], qs, 3); + pack_q8_0_quants(&x[i * 8 + 4], qs, 4); + pack_q8_0_quants(&x[i * 8 + 5], qs, 5); + pack_q8_0_quants(&x[i * 8 + 6], qs, 6); + pack_q8_0_quants(&x[i * 8 + 7], qs, 7); + } + + // Init the scales + // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_Q8_0x4x2) + // the last block is truncated and overriden by the scales. + for (int i = 0; i < nb; i++) { + // Unpack the scales + x[i * 8 + 0].d = 0; + x[i * 8 + 1].d = 0; + x[i * 8 + 2].d = 0; + x[i * 8 + 3].d = 0; + x[i * 8 + 4].d = 0; + x[i * 8 + 5].d = 0; + x[i * 8 + 6].d = 0; + x[i * 8 + 7].d = 0; + } +} + +// repack q8_0 data into q8x4x2 tensor +static void repack_q8_0_q8x4x2(ggml_tensor * t, const void * data, size_t size) { + int64_t nrows = ggml_nrows(t); + + size_t row_size = ggml_row_size(t->type, t->ne[0]); + size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_Q8_0x4x2)); // extra elements for the pad + size_t row_size_rp = row_size * 2; // extra space for tmp pad (if any) + + void * buf_pd = ggml_aligned_malloc(row_size_pd); + GGML_ASSERT(buf_pd != NULL); + + void * buf_rp = ggml_aligned_malloc(row_size_rp); + GGML_ASSERT(buf_rp != NULL); + + HEX_VERBOSE("ggml-hex: repack-q8_0-q8x4x2 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, size, + t->ne[0], nrows, row_size); + + init_row_q8x4x2((block_q8_0 *) buf_pd, t->ne[0]); // init padded buffer to make sure the tail is all zeros + + for (int64_t i = 0; i < nrows; i++) { + const uint8_t * src = (const uint8_t *) data + (i * row_size); + uint8_t * dst = (uint8_t *) t->data + (i * row_size); + + memcpy(buf_pd, src, row_size); + repack_row_q8x4x2((uint8_t *) buf_rp, (const block_q8_0 *) buf_pd, t->ne[0]); + memcpy(dst, buf_rp, row_size); + } + + ggml_aligned_free(buf_pd, row_size_pd); + ggml_aligned_free(buf_rp, row_size_rp); +} + +// repack q8x4x2 tensor into q8_0 data +static void repack_q8x4x2_q8_0(void * data, const ggml_tensor * t, size_t size) { + int64_t nrows = ggml_nrows(t); + + size_t row_size = ggml_row_size(t->type, t->ne[0]); + size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_Q8_0x4x2)); // extra elements for the pad + size_t row_size_rp = row_size * 2; // extra space for tmp pad (if any) + + void * buf_pd = ggml_aligned_malloc(row_size_pd); + GGML_ASSERT(buf_pd != NULL); + + void * buf_rp = ggml_aligned_malloc(row_size_rp); + GGML_ASSERT(buf_rp != NULL); + + HEX_VERBOSE("ggml-hex: repack-q8x4x2-q8_0 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, size, + t->ne[0], nrows, row_size); + + memset(buf_pd, 0, row_size_pd); // clear-out padded buffer to make sure the tail is all zeros + + for (int64_t i = 0; i < nrows; i++) { + const uint8_t * src = (const uint8_t *) t->data + (i * row_size); + uint8_t * dst = (uint8_t *) data + (i * row_size); + + memcpy(buf_pd, src, row_size); + unpack_row_q8x4x2((block_q8_0 *) buf_rp, (const uint8_t *) buf_pd, t->ne[0]); + memcpy(dst, buf_rp, row_size); + } + + ggml_aligned_free(buf_pd, row_size_pd); + ggml_aligned_free(buf_rp, row_size_rp); +} + +// ======== MXFP4x4x2 ==================== +struct x2_mxfp4 { + int v[2]; +}; + +static x2_mxfp4 unpack_mxfp4(uint8_t v) { + x2_mxfp4 x; + x.v[0] = kvalues_mxfp4[(v & 0x0f)]; + x.v[1] = kvalues_mxfp4[(v >> 4)]; + return x; +} + +static void dump_block_mxfp4(const block_mxfp4 * b, int i) { + HEX_VERBOSE("ggml-hex: repack mxfp4 %d: %d %d %d %d ... %d %d %d %d : %.6f\n", i, unpack_mxfp4(b->qs[0]).v[0], + unpack_mxfp4(b->qs[1]).v[0], unpack_mxfp4(b->qs[2]).v[0], unpack_mxfp4(b->qs[3]).v[0], + unpack_mxfp4(b->qs[12]).v[1], unpack_mxfp4(b->qs[13]).v[1], unpack_mxfp4(b->qs[14]).v[1], + unpack_mxfp4(b->qs[15]).v[1], GGML_E8M0_TO_FP32_HALF(b->e)); +} + +static void dump_packed_block_mxfp4x4x2(const uint8_t * v, unsigned int i, size_t k) { + static const int qk = QK_MXFP4x4x2; + const int eblk_size = 8 * 1; // 8x E8M0 + const int qblk_size = qk / 2; // int4 + const int qrow_size = k / 2; // int4 (not padded) + + const uint8_t * v_q = v + 0; // quants first + const uint8_t * v_e = v + qrow_size; // then scales + + const uint8_t * q = v_q + i * qblk_size; + const uint8_t * e = (const uint8_t *) (v_e + i * eblk_size); + + HEX_VERBOSE("ggml-hex: repack mxfp4x4x2-%d: %d %d %d %d ... %d %d %d %d ... %d %d %d %d : %.6f %.6f %.6f %.6f\n", i, + unpack_mxfp4(q[0]).v[0], unpack_mxfp4(q[1]).v[0], unpack_mxfp4(q[2]).v[0], unpack_mxfp4(q[3]).v[0], + unpack_mxfp4(q[60]).v[0], unpack_mxfp4(q[61]).v[0], unpack_mxfp4(q[62]).v[0], unpack_mxfp4(q[63]).v[0], + unpack_mxfp4(q[124]).v[0], unpack_mxfp4(q[125]).v[0], unpack_mxfp4(q[126]).v[0], + unpack_mxfp4(q[127]).v[0], GGML_E8M0_TO_FP32_HALF(e[0]), GGML_E8M0_TO_FP32_HALF(e[1]), + GGML_E8M0_TO_FP32_HALF(e[2]), GGML_E8M0_TO_FP32_HALF(e[3])); + + HEX_VERBOSE("ggml-hex: repack mxfp4x4x2-%d: %d %d %d %d ... %d %d %d %d ... %d %d %d %d : %.6f %.6f %.6f %.6f\n", + i + 1, unpack_mxfp4(q[0]).v[1], unpack_mxfp4(q[1]).v[1], unpack_mxfp4(q[2]).v[1], + unpack_mxfp4(q[3]).v[1], unpack_mxfp4(q[60]).v[1], unpack_mxfp4(q[61]).v[1], unpack_mxfp4(q[62]).v[1], + unpack_mxfp4(q[63]).v[1], unpack_mxfp4(q[124]).v[1], unpack_mxfp4(q[125]).v[1], + unpack_mxfp4(q[126]).v[1], unpack_mxfp4(q[127]).v[1], GGML_E8M0_TO_FP32_HALF(e[4]), + GGML_E8M0_TO_FP32_HALF(e[5]), GGML_E8M0_TO_FP32_HALF(e[6]), GGML_E8M0_TO_FP32_HALF(e[7])); +} + +static void unpack_mxfp4_quants(uint8_t * qs, const block_mxfp4 * x, unsigned int bi) { + static const int qk = QK_MXFP4; + + for (unsigned int i = 0; i < qk / 2; ++i) { + const uint8_t x0 = (x->qs[i] & 0x0F); + const uint8_t x1 = (x->qs[i] >> 4); + qs[bi * qk + i + 0] = x0; + qs[bi * qk + i + qk / 2] = x1; + } +} + +static void pack_mxfp4_quants(block_mxfp4 * x, const uint8_t * qs, unsigned int bi) { + static const int qk = QK4_0; + + for (unsigned int i = 0; i < qk / 2; ++i) { + const uint8_t x0 = qs[bi * qk + i + 0]; + const uint8_t x1 = qs[bi * qk + i + qk / 2]; + x->qs[i] = x0 | (x1 << 4); + } +} + +static void repack_row_mxfp4x4x2(uint8_t * y, const block_mxfp4 * x, int64_t k) { + static const int qk = QK_MXFP4x4x2; + const int nb = (k + qk - 1) / qk; // number of blocks (padded) + + const int eblk_size = 8 * 1; // 8x E8M0 + const int qblk_size = qk / 2; // int4 + const int qrow_size = k / 2; // int4 (not padded to blocks) + + uint8_t * y_q = y + 0; // quants first + uint8_t * y_e = y + qrow_size; // then scales + + if (opt_verbose > 2) { + for (int i = 0; i < nb; i++) { + dump_block_mxfp4(&x[i * 8 + 0], 0); + dump_block_mxfp4(&x[i * 8 + 1], 1); + dump_block_mxfp4(&x[i * 8 + 2], 2); + dump_block_mxfp4(&x[i * 8 + 3], 3); + dump_block_mxfp4(&x[i * 8 + 4], 4); + dump_block_mxfp4(&x[i * 8 + 5], 5); + dump_block_mxfp4(&x[i * 8 + 6], 6); + dump_block_mxfp4(&x[i * 8 + 7], 7); + } + } + + // Repack the quants + for (int i = 0; i < nb; i++) { + uint8_t qs[QK_MXFP4x4x2]; // unpacked quants + + unpack_mxfp4_quants(qs, &x[i * 8 + 0], 0); + unpack_mxfp4_quants(qs, &x[i * 8 + 1], 1); + unpack_mxfp4_quants(qs, &x[i * 8 + 2], 2); + unpack_mxfp4_quants(qs, &x[i * 8 + 3], 3); + unpack_mxfp4_quants(qs, &x[i * 8 + 4], 4); + unpack_mxfp4_quants(qs, &x[i * 8 + 5], 5); + unpack_mxfp4_quants(qs, &x[i * 8 + 6], 6); + unpack_mxfp4_quants(qs, &x[i * 8 + 7], 7); + + uint8_t * q = y_q + (i * qblk_size); + for (int j = 0; j < qk / 2; j++) { + q[j] = (qs[j + 128] << 4) | qs[j]; + } + } + + // Repack the scales + // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_MXFP4x4x2) + // the last block is truncated and overriden by the scales. + for (int i = 0; i < nb; i++) { + // Repack the scales + uint8_t * e = (uint8_t *) (y_e + i * eblk_size); + e[0] = x[i * 8 + 0].e; + e[1] = x[i * 8 + 1].e; + e[2] = x[i * 8 + 2].e; + e[3] = x[i * 8 + 3].e; + e[4] = x[i * 8 + 4].e; + e[5] = x[i * 8 + 5].e; + e[6] = x[i * 8 + 6].e; + e[7] = x[i * 8 + 7].e; + } + + if (opt_verbose > 1) { + for (int i = 0; i < nb; i++) { + dump_packed_block_mxfp4x4x2(y, i, k); + } + } +} + +static void unpack_row_mxfp4x4x2(block_mxfp4 * x, const uint8_t * y, int64_t k) { + static const int qk = QK_MXFP4x4x2; + const int nb = (k + qk - 1) / qk; // number of blocks (padded) + + const int eblk_size = 8 * 1; // 8x E8M0 + const int qblk_size = qk / 2; // int4 + const int qrow_size = k / 2; // int4 (not padded to blocks) + + const uint8_t * y_q = y + 0; // quants first + const uint8_t * y_e = y + qrow_size; // then scales + + if (opt_verbose > 1) { + for (int i = 0; i < nb; i++) { + dump_packed_block_mxfp4x4x2(y, i, k); + } + } + + // Unpack the quants + for (int i = 0; i < nb; i++) { + uint8_t qs[QK_MXFP4x4x2]; // unpacked quants + + const uint8_t * q = y_q + (i * qblk_size); + for (int j = 0; j < qk / 2; j++) { + qs[j] = q[j] & 0xf; + qs[j + 128] = q[j] >> 4; + } + + pack_mxfp4_quants(&x[i * 8 + 0], qs, 0); + pack_mxfp4_quants(&x[i * 8 + 1], qs, 1); + pack_mxfp4_quants(&x[i * 8 + 2], qs, 2); + pack_mxfp4_quants(&x[i * 8 + 3], qs, 3); + pack_mxfp4_quants(&x[i * 8 + 4], qs, 4); + pack_mxfp4_quants(&x[i * 8 + 5], qs, 5); + pack_mxfp4_quants(&x[i * 8 + 6], qs, 6); + pack_mxfp4_quants(&x[i * 8 + 7], qs, 7); + } + + // Repack the scales + // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_MXFP4_0x4x2) + // the last block is truncated and overriden by the scales. + for (int i = 0; i < nb; i++) { + // Unpack the scales + const uint8_t * e = (const uint8_t *) (y_e + i * eblk_size); + x[i * 8 + 0].e = e[0]; + x[i * 8 + 1].e = e[1]; + x[i * 8 + 2].e = e[2]; + x[i * 8 + 3].e = e[3]; + x[i * 8 + 4].e = e[4]; + x[i * 8 + 5].e = e[5]; + x[i * 8 + 6].e = e[6]; + x[i * 8 + 7].e = e[7]; + } + + if (opt_verbose > 2) { + for (int i = 0; i < nb; i++) { + dump_block_mxfp4(&x[i * 8 + 0], 0); + dump_block_mxfp4(&x[i * 8 + 1], 1); + dump_block_mxfp4(&x[i * 8 + 2], 2); + dump_block_mxfp4(&x[i * 8 + 3], 3); + dump_block_mxfp4(&x[i * 8 + 4], 4); + dump_block_mxfp4(&x[i * 8 + 5], 5); + dump_block_mxfp4(&x[i * 8 + 6], 6); + dump_block_mxfp4(&x[i * 8 + 7], 7); + } + } +} + +static void init_row_mxfp4x4x2(block_mxfp4 * x, int64_t k) { + static const int qk = QK_MXFP4x4x2; + const int nb = (k + qk - 1) / qk; // number of blocks (padded) + + // Init the quants such that they unpack into zeros + uint8_t qs[QK_MXFP4x4x2]; // unpacked quants + memset(qs, 0, sizeof(qs)); + + for (int i = 0; i < nb; i++) { + pack_mxfp4_quants(&x[i * 8 + 0], qs, 0); + pack_mxfp4_quants(&x[i * 8 + 1], qs, 1); + pack_mxfp4_quants(&x[i * 8 + 2], qs, 2); + pack_mxfp4_quants(&x[i * 8 + 3], qs, 3); + pack_mxfp4_quants(&x[i * 8 + 4], qs, 4); + pack_mxfp4_quants(&x[i * 8 + 5], qs, 5); + pack_mxfp4_quants(&x[i * 8 + 6], qs, 6); + pack_mxfp4_quants(&x[i * 8 + 7], qs, 7); + } + + // Init the scales + // Note: Do not combine with the loop above. For tensor sizes not multiple of 256 (QK_MXFP4x4x2) + // the last block is truncated and overriden by the scales. + for (int i = 0; i < nb; i++) { + // Unpack the scales + x[i * 8 + 0].e = 0; + x[i * 8 + 1].e = 0; + x[i * 8 + 2].e = 0; + x[i * 8 + 3].e = 0; + x[i * 8 + 4].e = 0; + x[i * 8 + 5].e = 0; + x[i * 8 + 6].e = 0; + x[i * 8 + 7].e = 0; + } +} + +// repack mxfp4 data into mxfp4x4x2 tensor +static void repack_mxfp4_mxfp4x4x2(ggml_tensor * t, const void * data, size_t size) { + int64_t nrows = ggml_nrows(t); + + size_t row_size = ggml_row_size(t->type, t->ne[0]); + size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_MXFP4x4x2)); // extra elements for the pad + size_t row_size_rp = row_size * 2; // extra space for tmp pad (if any) + + void * buf_pd = ggml_aligned_malloc(row_size_pd); + GGML_ASSERT(buf_pd != NULL); + + void * buf_rp = ggml_aligned_malloc(row_size_rp); + GGML_ASSERT(buf_rp != NULL); + + HEX_VERBOSE("ggml-hex: repack-mxfp4-mxfp4x4x2 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, + size, t->ne[0], nrows, row_size); + + init_row_mxfp4x4x2((block_mxfp4 *) buf_pd, t->ne[0]); // init padded buffer to make sure the tail is all zeros + + for (int64_t i = 0; i < nrows; i++) { + const uint8_t * src = (const uint8_t *) data + (i * row_size); + uint8_t * dst = (uint8_t *) t->data + (i * row_size); + + memcpy(buf_pd, src, row_size); + repack_row_mxfp4x4x2((uint8_t *) buf_rp, (const block_mxfp4 *) buf_pd, t->ne[0]); + memcpy(dst, buf_rp, row_size); + } + + ggml_aligned_free(buf_pd, row_size_pd); + ggml_aligned_free(buf_rp, row_size_rp); +} + +// repack mxfp4x4x2 tensor into mxfp4 data +static void repack_mxfp4x4x2_mxfp4(void * data, const ggml_tensor * t, size_t size) { + int64_t nrows = ggml_nrows(t); + + size_t row_size = ggml_row_size(t->type, t->ne[0]); + size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_MXFP4x4x2)); // extra elements for the pad + size_t row_size_rp = row_size * 2; // extra space for tmp pad (if any) + + void * buf_pd = ggml_aligned_malloc(row_size_pd); + GGML_ASSERT(buf_pd != NULL); + + void * buf_rp = ggml_aligned_malloc(row_size_rp); + GGML_ASSERT(buf_rp != NULL); + + HEX_VERBOSE("ggml-hex: repack-mxfp4x4x2-mxfp4 %s : data %p size %zu dims %ldx%ld row-size %zu\n", t->name, data, + size, t->ne[0], nrows, row_size); + + memset(buf_pd, 0, row_size_pd); // clear-out padded buffer to make sure the tail is all zeros + + for (int64_t i = 0; i < nrows; i++) { + const uint8_t * src = (const uint8_t *) t->data + (i * row_size); + uint8_t * dst = (uint8_t *) data + (i * row_size); + + memcpy(buf_pd, src, row_size); + unpack_row_mxfp4x4x2((block_mxfp4 *) buf_rp, (const uint8_t *) buf_pd, t->ne[0]); + memcpy(dst, buf_rp, row_size); + } + + ggml_aligned_free(buf_pd, row_size_pd); + ggml_aligned_free(buf_rp, row_size_rp); +} + +static void ggml_backend_hexagon_buffer_set_tensor(ggml_backend_buffer_t buffer, + ggml_tensor * tensor, + const void * data, + size_t offset, + size_t size) { + auto ctx = (ggml_backend_hexagon_buffer_context *) buffer->context; + auto sess = ctx->sess; + + HEX_VERBOSE("ggml-hex: %s set-tensor %s : data %p offset %zu size %zu\n", sess->name.c_str(), tensor->name, data, + offset, size); + + switch (tensor->type) { + case GGML_TYPE_Q4_0: + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(tensor)); + repack_q4_0_q4x4x2(tensor, data, size); + break; + + case GGML_TYPE_Q8_0: + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(tensor)); + repack_q8_0_q8x4x2(tensor, data, size); + break; + + case GGML_TYPE_MXFP4: + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(tensor)); + repack_mxfp4_mxfp4x4x2(tensor, data, size); + break; + + default: + memcpy((char *) tensor->data + offset, data, size); + break; + } +} + +static void ggml_backend_hexagon_buffer_get_tensor(ggml_backend_buffer_t buffer, + const ggml_tensor * tensor, + void * data, + size_t offset, + size_t size) { + auto ctx = (ggml_backend_hexagon_buffer_context *) buffer->context; + auto sess = ctx->sess; + + HEX_VERBOSE("ggml-hex: %s get-tensor %s : data %p offset %zu size %zu\n", sess->name.c_str(), tensor->name, data, + offset, size); + + switch (tensor->type) { + case GGML_TYPE_Q4_0: + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(tensor)); + repack_q4x4x2_q4_0(data, tensor, size); + break; + + case GGML_TYPE_Q8_0: + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(tensor)); + repack_q8x4x2_q8_0(data, tensor, size); + break; + + case GGML_TYPE_MXFP4: + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(tensor)); + repack_mxfp4x4x2_mxfp4(data, tensor, size); + break; + + default: + memcpy(data, (const char *) tensor->data + offset, size); + break; + } +} + +static bool ggml_backend_hexagon_buffer_cpy_tensor(ggml_backend_buffer_t buffer, + const struct ggml_tensor * src, + struct ggml_tensor * dst) { + GGML_UNUSED(buffer); + GGML_UNUSED(src); + GGML_UNUSED(dst); + // we might optimize this later, for now take the slow path (ie get/set_tensor) + return false; +} + +static void ggml_backend_hexagon_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { + auto ctx = (ggml_backend_hexagon_buffer_context *) buffer->context; + auto sess = ctx->sess; + HEX_VERBOSE("ggml-hex: %s clear-buff base %p size %zu\n", sess->name.c_str(), (void *) ctx->base, ctx->size); + memset(ctx->base, value, ctx->size); +} + +static ggml_backend_buffer_i ggml_backend_hexagon_buffer_interface = { + /* .free_buffer = */ ggml_backend_hexagon_buffer_free_buffer, + /* .get_base = */ ggml_backend_hexagon_buffer_get_base, + /* .init_tensor = */ ggml_backend_hexagon_buffer_init_tensor, + /* .memset_tensor = */ NULL, + /* .set_tensor = */ ggml_backend_hexagon_buffer_set_tensor, + /* .get_tensor = */ ggml_backend_hexagon_buffer_get_tensor, + /* .cpy_tensor = */ ggml_backend_hexagon_buffer_cpy_tensor, + /* .clear = */ ggml_backend_hexagon_buffer_clear, + /* .reset = */ NULL, +}; + +// ** backend buffer type + +static const char * ggml_backend_hexagon_buffer_type_name(ggml_backend_buffer_type_t buffer_type) { + return static_cast(buffer_type->context)->name.c_str(); +} + +static ggml_backend_buffer_t ggml_backend_hexagon_buffer_type_alloc_buffer( + ggml_backend_buffer_type_t buffer_type, size_t size) { + auto sess = static_cast(buffer_type->context)->sess; + try { + ggml_backend_hexagon_buffer_context * ctx = new ggml_backend_hexagon_buffer_context(sess, size, false /*repack*/); + return ggml_backend_buffer_init(buffer_type, ggml_backend_hexagon_buffer_interface, ctx, size); + } catch (std::exception const &exc) { + GGML_LOG_ERROR("ggml-hex: %s failed to allocate buffer context: %s\n", sess->name.c_str(), exc.what()); + return nullptr; + } +} + +static ggml_backend_buffer_t ggml_backend_hexagon_repack_buffer_type_alloc_buffer( + ggml_backend_buffer_type_t buffer_type, size_t size) { + auto sess = static_cast(buffer_type->context)->sess; + try { + ggml_backend_hexagon_buffer_context * ctx = new ggml_backend_hexagon_buffer_context(sess, size, true /*repack*/); + return ggml_backend_buffer_init(buffer_type, ggml_backend_hexagon_buffer_interface, ctx, size); + } catch (std::exception const &exc) { + GGML_LOG_ERROR("ggml-hex: %s failed to allocate buffer context: %s\n", sess->name.c_str(), exc.what()); + return nullptr; + } +} + +static size_t ggml_backend_hexagon_buffer_type_get_alignment(ggml_backend_buffer_type_t buffer_type) { + return 128; // HVX alignment + GGML_UNUSED(buffer_type); +} + +static size_t ggml_backend_hexagon_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * t) { + return ggml_nbytes(t); +} + +static size_t ggml_backend_hexagon_buffer_type_get_max_size(ggml_backend_buffer_type_t buffer_type) { + return 1 * 1024 * 1024 * 1024; // 1GB per buffer + GGML_UNUSED(buffer_type); +} + +static bool ggml_backend_hexagon_buffer_type_is_host(ggml_backend_buffer_type_t buft) { + return opt_hostbuf; + GGML_UNUSED(buft); +} + +static bool ggml_backend_hexagon_repack_buffer_type_is_host(ggml_backend_buffer_type_t buft) { + return false; + GGML_UNUSED(buft); +} + +static ggml_backend_buffer_type_i ggml_backend_hexagon_buffer_type_interface = { + /* .get_name = */ ggml_backend_hexagon_buffer_type_name, + /* .alloc_buffer = */ ggml_backend_hexagon_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_hexagon_buffer_type_get_alignment, + /* .get_max_size = */ ggml_backend_hexagon_buffer_type_get_max_size, + /* .get_alloc_size = */ ggml_backend_hexagon_buffer_type_get_alloc_size, + /* .is_host = */ ggml_backend_hexagon_buffer_type_is_host, +}; + +static ggml_backend_buffer_type_i ggml_backend_hexagon_repack_buffer_type_interface = { + /* .get_name = */ ggml_backend_hexagon_buffer_type_name, + /* .alloc_buffer = */ ggml_backend_hexagon_repack_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_hexagon_buffer_type_get_alignment, + /* .get_max_size = */ ggml_backend_hexagon_buffer_type_get_max_size, + /* .get_alloc_size = */ ggml_backend_hexagon_buffer_type_get_alloc_size, + /* .is_host = */ ggml_backend_hexagon_repack_buffer_type_is_host, +}; + +void ggml_hexagon_session::allocate(int dev_id) noexcept(false) { + this->valid_session = false; + this->valid_handle = false; + this->valid_queue = false; + this->valid_iface = false; + + this->domain_id = 3; // Default for CDSP, updated after the session is created + this->session_id = 0; // Default for CDSP, updated after the session is created + this->dev_id = dev_id; + this->name = std::string("HTP") + std::to_string(dev_id); + + this->op_pending = 0; + this->prof_usecs = 0; + this->prof_cycles = 0; + this->prof_pkts = 0; + + GGML_LOG_INFO("ggml-hex: allocating new session: %s\n", this->name.c_str()); + + domain * my_domain = get_domain(this->domain_id); + if (my_domain == NULL) { + GGML_LOG_ERROR("ggml-hex: unable to get domain struct for CDSP\n"); + throw std::runtime_error("ggml-hex: failed to get CDSP domain (see log for details)"); + } + + // Create new session + if (dev_id != 0) { + struct remote_rpc_reserve_new_session n; + n.domain_name_len = strlen(CDSP_DOMAIN_NAME); + n.domain_name = const_cast(CDSP_DOMAIN_NAME); + n.session_name = const_cast(this->name.c_str()); + n.session_name_len = this->name.size(); + + int err = remote_session_control(FASTRPC_RESERVE_NEW_SESSION, (void *) &n, sizeof(n)); + if (err != AEE_SUCCESS) { + GGML_LOG_ERROR("ggml-hex: failed to reserve new session %d : error 0x%x\n", dev_id, err); + throw std::runtime_error("ggml-hex: remote_session_control(new-sess) failed (see log for details)"); + } + + // Save the IDs + this->session_id = n.session_id; + this->domain_id = n.effective_domain_id; + this->valid_session = true; + } + + // Get session URI + char htp_uri[256]; + sprintf(htp_uri, "file:///libggml-htp-v%u.so?htp_iface_skel_handle_invoke&_modver=1.0", opt_arch); + + char session_uri[256]; + { + struct remote_rpc_get_uri u; + u.session_id = this->session_id; + u.domain_name = const_cast(CDSP_DOMAIN_NAME); + u.domain_name_len = strlen(CDSP_DOMAIN_NAME); + u.module_uri = const_cast(htp_uri); + u.module_uri_len = strlen(htp_uri); + u.uri = session_uri; + u.uri_len = sizeof(session_uri); + + int err = remote_session_control(FASTRPC_GET_URI, (void *) &u, sizeof(u)); + if (err != AEE_SUCCESS) { + GGML_LOG_ERROR("ggml-hex: failed to get URI for session %d : error 0x%x\n", dev_id, err); + throw std::runtime_error("ggml-hex: remote_session_control(get-uri) failed (see log for details)"); + } + } + + // Enable Unsigned PD + { + struct remote_rpc_control_unsigned_module u; + u.domain = this->domain_id; + u.enable = 1; + int err = remote_session_control(DSPRPC_CONTROL_UNSIGNED_MODULE, (void *) &u, sizeof(u)); + if (err != AEE_SUCCESS) { + GGML_LOG_ERROR("ggml-hex: failed to enable unsigned PD for session %d : error 0x%x\n", dev_id, err); + throw std::runtime_error("ggml-hex: remote_session_control(unsign) failed (see log for details)"); + } + } + + // Open session + int err = htp_iface_open(session_uri, &this->handle); + if (err != AEE_SUCCESS) { + GGML_LOG_ERROR("ggml-hex: failed to open session %d : error 0x%x\n", dev_id, err); + throw std::runtime_error("ggml-hex: failed to open session (see log for details)"); + } + + this->valid_handle = true; + + GGML_LOG_INFO("ggml-hex: new session: %s : session-id %d domain-id %d uri %s handle 0x%lx\n", this->name.c_str(), + this->session_id, this->domain_id, session_uri, (unsigned long) this->handle); + + // Enable FastRPC QoS mode + { + struct remote_rpc_control_latency l; + l.enable = 1; + + int err = remote_handle64_control(this->handle, DSPRPC_CONTROL_LATENCY, (void *) &l, sizeof(l)); + if (err != 0) { + GGML_LOG_WARN("ggml-hex: failed to enable fastrpc QOS mode: 0x%08x\n", (unsigned) err); + } + } + + // Now let's setup the DSP queue + err = dspqueue_create(this->domain_id, + 0, // Flags + 128 * 1024, // Request queue size (in bytes) + 64 * 1024, // Response queue size (in bytes) + htp_packet_callback, htp_error_callback, + (void *) this, // Callback context + &queue); + if (err != 0) { + GGML_LOG_ERROR("ggml-hex: %s dspqueue_create failed: 0x%08x\n", this->name.c_str(), (unsigned) err); + throw std::runtime_error("ggml-hex: failed to create dspqueue (see log for details)"); + } + + this->valid_queue = true; + + // Export queue for use on the DSP + err = dspqueue_export(queue, &this->queue_id); + if (err != 0) { + GGML_LOG_ERROR("ggml-hex: dspqueue_export failed: 0x%08x\n", (unsigned) err); + throw std::runtime_error("ggml-hex: dspqueue export failed (see log for details)"); + } + + if (opt_etm) { + err = htp_iface_enable_etm(this->handle); + if (err != 0) { + GGML_LOG_ERROR("ggml-hex: failed to enable ETM tracing: 0x%08x\n", (unsigned) err); + } + } + + // Start the DSP-side service. We need to pass the queue ID to the + // DSP in a FastRPC call; the DSP side will import the queue and start + // listening for packets in a callback. + err = htp_iface_start(this->handle, dev_id, this->queue_id, opt_nhvx); + if (err != 0) { + GGML_LOG_ERROR("ggml-hex: failed to start session: 0x%08x\n", (unsigned) err); + throw std::runtime_error("ggml-hex: iface start failed (see log for details)"); + } + this->valid_iface = true; +} + +void ggml_hexagon_session::release() noexcept(true) { + GGML_LOG_INFO("ggml-hex: releasing session: %s\n", this->name.c_str()); + + int err; + + // Stop the DSP-side service and close the queue + if (this->valid_iface) { + err = htp_iface_stop(this->handle); + if (err != 0) { + GGML_ABORT("ggml-hex: htp_iface_stop failed: 0x%08x\n", (unsigned) err); + } + } + + if (opt_etm) { + err = htp_iface_disable_etm(this->handle); + if (err != 0) { + GGML_LOG_ERROR("ggml-hex: warn : failed to disable ETM tracing: 0x%08x\n", (unsigned) err); + } + } + + if (this->valid_queue) { + err = dspqueue_close(queue); + if (err != 0) { + GGML_ABORT("ggml-hex: dspqueue_close failed: 0x%08x\n", (unsigned) err); + } + } + + if (this->valid_handle) { + htp_iface_close(this->handle); + } +} + +ggml_hexagon_session::ggml_hexagon_session(int dev_id) noexcept(false) { + buffer_type.context = nullptr; + repack_buffer_type.context = nullptr; + + try { + allocate(dev_id); + + buffer_type.iface = ggml_backend_hexagon_buffer_type_interface; + buffer_type.context = new ggml_backend_hexagon_buffer_type_context(this->name, this); + + repack_buffer_type.iface = ggml_backend_hexagon_repack_buffer_type_interface; + repack_buffer_type.context = new ggml_backend_hexagon_buffer_type_context(this->name + "-REPACK", this); + } catch (std::exception const &exc) { + release(); + throw; + } +} + +ggml_hexagon_session::~ggml_hexagon_session() noexcept(true) { + release(); + + delete static_cast(buffer_type.context); + delete static_cast(repack_buffer_type.context); +} + +// ** backend interface + +static bool ggml_backend_buffer_is_hexagon(const struct ggml_backend_buffer * b) { + return b->buft->iface.get_alignment == ggml_backend_hexagon_buffer_type_get_alignment; +} + +static inline bool ggml_backend_buffer_is_hexagon_repack(const struct ggml_backend_buffer * b) { + return b->buft->iface.alloc_buffer == ggml_backend_hexagon_repack_buffer_type_alloc_buffer; +} + +static bool hex_supported_dims2(const struct ggml_tensor * x, const struct ggml_tensor * y) { + if (x->ne[0] != y->ne[0]) { + return false; + } + if (x->ne[1] != y->ne[1]) { + return false; + } + if (x->ne[2] != y->ne[2]) { + return false; + } + if (x->ne[3] != y->ne[3]) { + return false; + } + + return true; +} + +static bool hex_supported_src0_type(ggml_type t) { + return t == GGML_TYPE_F32; +} + +static bool hex_supported_src1_type(ggml_type t) { + return t == GGML_TYPE_F32; +} + +static bool hex_supported_src2_type(ggml_type t) { + return t == GGML_TYPE_F32; +} + +static bool hex_supported_src1_type2(ggml_type t) { + return t == GGML_TYPE_F16; +} + +static bool hex_supported_src1_type3(ggml_type t) { + return t == GGML_TYPE_I32; +} + +static bool hex_supported_dst_type(ggml_type t) { + return t == GGML_TYPE_F32; +} + +static bool hex_supported_dims(const struct ggml_tensor * x, const struct ggml_tensor * y) { + // TODO: support broadcast for ne[2 and 3] + if (x->ne[0] != y->ne[0]) { + return false; + } + if (x->ne[2] != y->ne[2]) { + return false; + } + if (x->ne[3] != y->ne[3]) { + return false; + } + return true; +} + +static bool ggml_hexagon_supported_mul_mat(const struct ggml_hexagon_session * sess, const struct ggml_tensor * dst) { + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + if (src1->type != GGML_TYPE_F32 || dst->type != GGML_TYPE_F32) { + return false; + } + + // TODO: add support for non-cont tensors + if (!ggml_is_contiguous(src1) || !ggml_is_contiguous(dst)) { + return false; + } + + switch (src0->type) { + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q8_0: + case GGML_TYPE_MXFP4: + if (src0->ne[0] % 32) { + return false; + } + + if (src0->ne[1] > 16 * 1024) { + return false; // typically the lm-head which would be too large for VTCM + } + + // if ((src0->ne[2] != src1->ne[2] || src0->ne[3] != src1->ne[3])) return false; + if ((src1->ne[2] != 1 || src1->ne[3] != 1)) { + return false; + } + + // src0 (weights) must be repacked + if (src0->buffer && !ggml_backend_buffer_is_hexagon_repack(src0->buffer)) { + return false; + } + break; + + case GGML_TYPE_F16: + if (!opt_experimental) { + return false; + } + break; + + default: + return false; + } + + // src0 & src1 & dst must be mapped to the same session + if (src0->buffer && + (!ggml_backend_buffer_is_hexagon(src0->buffer) || ggml_backend_hexagon_buffer_get_sess(src0->buffer) != sess)) { + return false; + } + if (src1->buffer && + (!ggml_backend_buffer_is_hexagon(src1->buffer) || ggml_backend_hexagon_buffer_get_sess(src1->buffer) != sess)) { + return false; + } + if (dst->buffer && + (!ggml_backend_buffer_is_hexagon(dst->buffer) || ggml_backend_hexagon_buffer_get_sess(dst->buffer) != sess)) { + return false; + } + + return true; +} + +static bool ggml_hexagon_supported_mul_mat_id(const struct ggml_hexagon_session * sess, const struct ggml_tensor * op) { + const struct ggml_tensor * src0 = op->src[0]; + const struct ggml_tensor * src1 = op->src[1]; + const struct ggml_tensor * src2 = op->src[2]; + const struct ggml_tensor * dst = op; + + if (src1->type != GGML_TYPE_F32 || dst->type != GGML_TYPE_F32 || src2->type != GGML_TYPE_I32) { + return false; + } + + switch (src0->type) { + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q8_0: + case GGML_TYPE_MXFP4: + if ((src0->ne[0] % 32)) { + return false; + } + + // src0 (weights) must be repacked + if (src0->buffer && !ggml_backend_buffer_is_hexagon_repack(src0->buffer)) { + return false; + } + break; + + case GGML_TYPE_F16: + if (!opt_experimental) { + return false; + } + break; + + default: + return false; + } + + // TODO: add support for non-cont tensors + if (!ggml_is_contiguous(src1) || !ggml_is_contiguous(dst)) { + return false; + } + + // src0 (weights) must be repacked and mapped to the same session + // src1 & sr2 & dst must be mapped to the same session + if (src0->buffer && + (!ggml_backend_buffer_is_hexagon(src0->buffer) || ggml_backend_hexagon_buffer_get_sess(src0->buffer) != sess)) { + return false; + } + if (src1->buffer && + (!ggml_backend_buffer_is_hexagon(src1->buffer) || ggml_backend_hexagon_buffer_get_sess(src1->buffer) != sess)) { + return false; + } + if (src2->buffer && + (!ggml_backend_buffer_is_hexagon(src2->buffer) || ggml_backend_hexagon_buffer_get_sess(src2->buffer) != sess)) { + return false; + } + if (dst->buffer && + (!ggml_backend_buffer_is_hexagon(dst->buffer) || ggml_backend_hexagon_buffer_get_sess(dst->buffer) != sess)) { + return false; + } + + return true; +} + +static bool ggml_hexagon_supported_binary(const struct ggml_hexagon_session * sess, const struct ggml_tensor * op) { + const struct ggml_tensor * src0 = op->src[0]; + const struct ggml_tensor * src1 = op->src[1]; + const struct ggml_tensor * dst = op; + + if (!hex_supported_src0_type(src0->type)) { + return false; + } + if (!hex_supported_src1_type(src1->type)) { + return false; + } + if (!hex_supported_dst_type(dst->type)) { + return false; + } + if (!hex_supported_dims2(src0, dst)) { + return false; + } + if (!ggml_can_repeat(src1, src0)) { + return false; + } + + // TODO: add support for non-contigiuos tensors + if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(src1) || !ggml_is_contiguous(dst)) { + return false; + } + + // src0, src1 & dst must be mapped to the same session + if (src0->buffer && + (!ggml_backend_buffer_is_hexagon(src0->buffer) || ggml_backend_hexagon_buffer_get_sess(src0->buffer) != sess)) { + return false; + } + if (src1->buffer && + (!ggml_backend_buffer_is_hexagon(src1->buffer) || ggml_backend_hexagon_buffer_get_sess(src1->buffer) != sess)) { + return false; + } + if (dst->buffer && + (!ggml_backend_buffer_is_hexagon(dst->buffer) || ggml_backend_hexagon_buffer_get_sess(dst->buffer) != sess)) { + return false; + } + + return true; +} + +static bool ggml_hexagon_supported_add_id(const struct ggml_hexagon_session * sess, const struct ggml_tensor * op) { + const struct ggml_tensor * src0 = op->src[0]; + const struct ggml_tensor * src1 = op->src[1]; + const struct ggml_tensor * src2 = op->src[2]; + const struct ggml_tensor * dst = op; + + if (!hex_supported_src0_type(src0->type)) { + return false; + } + if (!hex_supported_src1_type(src1->type)) { + return false; + } + if (!hex_supported_dst_type(dst->type)) { + return false; + } + if (!hex_supported_dims2(src0, dst)) { + return false; + } + + // REVISIT: add support for non-contigiuos tensors + if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(src1) || !ggml_is_contiguous(dst)) { + return false; + } + + // src0, src1 & dst must be mapped to the same session + if (src0->buffer && + (!ggml_backend_buffer_is_hexagon(src0->buffer) || ggml_backend_hexagon_buffer_get_sess(src0->buffer) != sess)) { + return false; + } + if (src1->buffer && + (!ggml_backend_buffer_is_hexagon(src1->buffer) || ggml_backend_hexagon_buffer_get_sess(src1->buffer) != sess)) { + return false; + } + if (src2->buffer && + (!ggml_backend_buffer_is_hexagon(src2->buffer) || ggml_backend_hexagon_buffer_get_sess(src2->buffer) != sess)) { + return false; + } + if (dst->buffer && + (!ggml_backend_buffer_is_hexagon(dst->buffer) || ggml_backend_hexagon_buffer_get_sess(dst->buffer) != sess)) { + return false; + } + + return true; +} + +static bool ggml_hexagon_supported_unary(const struct ggml_hexagon_session * sess, const struct ggml_tensor * op) { + const struct ggml_tensor * src0 = op->src[0]; + const struct ggml_tensor * dst = op; + + if (!hex_supported_src0_type(src0->type)) { + return false; + } + if (!hex_supported_dst_type(dst->type)) { + return false; + } + if (!hex_supported_dims2(src0, dst)) { + return false; + } + + // TODO: add support for non-contigiuos tensors + if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(dst)) { + return false; + } + + // src0 & dst must be mapped to the same session + if (src0->buffer && + (!ggml_backend_buffer_is_hexagon(src0->buffer) || ggml_backend_hexagon_buffer_get_sess(src0->buffer) != sess)) { + return false; + } + if (dst->buffer && + (!ggml_backend_buffer_is_hexagon(dst->buffer) || ggml_backend_hexagon_buffer_get_sess(dst->buffer) != sess)) { + return false; + } + + return true; +} + +static bool ggml_hexagon_supported_activations(const struct ggml_hexagon_session * sess, + const struct ggml_tensor * op) { + const struct ggml_tensor * src0 = op->src[0]; + const struct ggml_tensor * src1 = op->src[1]; + const struct ggml_tensor * dst = op; + + if (!hex_supported_src0_type(src0->type)) { + return false; + } + if (!hex_supported_dst_type(dst->type)) { + return false; + } + + if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(dst)) { + return false; + } + + if (src1) { + if (!hex_supported_src1_type(src1->type)) { + return false; + } + if (!hex_supported_dims2(src0, src1)) { + return false; + } + if (!ggml_is_contiguous(src1)) { + return false; + } + } + + // src0, src1 & dst must be mapped to the same session + if (src0->buffer && + (!ggml_backend_buffer_is_hexagon(src0->buffer) || ggml_backend_hexagon_buffer_get_sess(src0->buffer) != sess)) { + return false; + } + if (src1 && src1->buffer && + (!ggml_backend_buffer_is_hexagon(src1->buffer) || ggml_backend_hexagon_buffer_get_sess(src1->buffer) != sess)) { + return false; + } + if (dst->buffer && + (!ggml_backend_buffer_is_hexagon(dst->buffer) || ggml_backend_hexagon_buffer_get_sess(dst->buffer) != sess)) { + return false; + } + + return true; +} + +static bool ggml_hexagon_supported_softmax(const struct ggml_hexagon_session * sess, const struct ggml_tensor * op) { + const struct ggml_tensor * src0 = op->src[0]; + const struct ggml_tensor * src1 = op->src[1]; + const struct ggml_tensor * src2 = op->src[2]; + const struct ggml_tensor * dst = op; + + if (src2) { + return false; // FIXME: add support for sinks + } + + if (!hex_supported_src0_type(src0->type)) { + return false; + } + if (!hex_supported_dst_type(dst->type)) { + return false; + } + + if (src1) { + if (!hex_supported_src1_type(src1->type) && !hex_supported_src1_type2(src1->type)) { + return false; + } + if (src0->ne[0] != src1->ne[0]) { + return false; + } + if (src1->ne[1] < src0->ne[1]) { + return false; + } + if (src0->ne[2] % src1->ne[2] != 0) { + return false; + } + if (src0->ne[3] % src1->ne[3] != 0) { + return false; + } + } + + if (src1) { + if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(src1) || !ggml_is_contiguous(dst)) { + return false; + } + } else { + if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(dst)) { + return false; + } + } + + // src0, src1 & dst must be mapped to the same session + if (src0->buffer && + (!ggml_backend_buffer_is_hexagon(src0->buffer) || ggml_backend_hexagon_buffer_get_sess(src0->buffer) != sess)) { + return false; + } + if (src1 && src1->buffer && + (!ggml_backend_buffer_is_hexagon(src1->buffer) || ggml_backend_hexagon_buffer_get_sess(src1->buffer) != sess)) { + return false; + } + if (dst->buffer && + (!ggml_backend_buffer_is_hexagon(dst->buffer) || ggml_backend_hexagon_buffer_get_sess(dst->buffer) != sess)) { + return false; + } + + return true; +} + +static bool ggml_hexagon_supported_rope(const struct ggml_hexagon_session * sess, const struct ggml_tensor * op) { + const int32_t * op_params = &op->op_params[0]; + + int mode = op_params[2]; + + if ((mode & GGML_ROPE_TYPE_NEOX) || (mode & GGML_ROPE_TYPE_MROPE) || (mode & GGML_ROPE_TYPE_VISION)) { + return false; + } + if (mode & 1) { + return false; + } + + const struct ggml_tensor * src0 = op->src[0]; + const struct ggml_tensor * src1 = op->src[1]; + const struct ggml_tensor * src2 = op->src[2]; + const struct ggml_tensor * dst = op; + + if (!hex_supported_src0_type(src0->type)) { + return false; // FIXME: add support for GGML_TYPE_F16 for src0 + } + if (!hex_supported_dst_type(dst->type)) { + return false; + } + if (!hex_supported_src1_type3(src1->type)) { + return false; + } + if (src2) { + if (!hex_supported_src2_type(src2->type)) { + return false; + } + int n_dims = op_params[1]; + if (src2->ne[0] < (n_dims / 2)) { + return false; + } + } + + if (src2) { + if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(src1) || !ggml_is_contiguous(src2) || + !ggml_is_contiguous(dst)) { + return false; + } + } else { + if (!ggml_is_contiguous(src0) || !ggml_is_contiguous(src1) || !ggml_is_contiguous(dst)) { + return false; + } + } + + // src0, src1, src2 & dst must be mapped to the same session + if (src0->buffer && + (!ggml_backend_buffer_is_hexagon(src0->buffer) || ggml_backend_hexagon_buffer_get_sess(src0->buffer) != sess)) { + return false; + } + if (src1->buffer && + (!ggml_backend_buffer_is_hexagon(src1->buffer) || ggml_backend_hexagon_buffer_get_sess(src1->buffer) != sess)) { + return false; + } + if (src2 && src2->buffer && + (!ggml_backend_buffer_is_hexagon(src2->buffer) || ggml_backend_hexagon_buffer_get_sess(src2->buffer) != sess)) { + return false; + } + if (dst->buffer && + (!ggml_backend_buffer_is_hexagon(dst->buffer) || ggml_backend_hexagon_buffer_get_sess(dst->buffer) != sess)) { + return false; + } + + return true; +} + +// Init hexagon tensor from GGML tensor and Hexagon buffer +static void init_htp_tensor(htp_tensor * h, const ggml_tensor * t) { + h->data = 0; // updated by the receiver + h->type = t->type; + h->ne[0] = t->ne[0]; + h->ne[1] = t->ne[1]; + h->ne[2] = t->ne[2]; + h->ne[3] = t->ne[3]; + h->nb[0] = t->nb[0]; + h->nb[1] = t->nb[1]; + h->nb[2] = t->nb[2]; + h->nb[3] = t->nb[3]; +} + +static void hex_dump_dspbuf(const struct ggml_tensor * t, const dspqueue_buffer * d) { + auto buf = static_cast(t->buffer->context); + auto sess = buf->sess; + + HEX_VERBOSE("ggml-hex: %s dspqbuf : %s base-addr %p base-size %zu data %p offset %u size %u\n", sess->name.c_str(), + t->name, (void *) buf->base, buf->size, (void *) d->ptr, (unsigned int) d->offset, + (unsigned int) d->size); +} + +static void ggml_hexagon_mul_mat(const struct ggml_tensor * op, uint32_t flags) { + const struct ggml_tensor * src0 = op->src[0]; + const struct ggml_tensor * src1 = op->src[1]; + const struct ggml_tensor * dst = op; + + auto src0_buf = static_cast(src0->buffer->context); + auto src1_buf = static_cast(src1->buffer->context); + auto dst_buf = static_cast(dst->buffer->context); + + uint64_t t1, t2; + t1 = ggml_time_us(); + + // Construct HTP message + htp_general_req req; + req.op = HTP_OP_MUL_MAT; + req.flags = flags; + + init_htp_tensor(&req.src0, src0); + init_htp_tensor(&req.src1, src1); + init_htp_tensor(&req.dst, dst); + + // Use opmask to override flags + if (!(opt_opmask & HTP_OPMASK_QUANTIZE)) { + req.flags |= HTP_OPFLAGS_SKIP_QUANTIZE; + } + if (!(opt_opmask & HTP_OPMASK_COMPUTE)) { + req.flags |= HTP_OPFLAGS_SKIP_COMPUTE; + } + + dspqueue_buffer bufs[3]; + memset(bufs, 0, sizeof(bufs)); + + // First buffer Weights. + // The content is static, there is no need to do any cache management + bufs[0].fd = src0_buf->fd; + bufs[0].ptr = src0->data; + bufs[0].offset = (uint8_t *) src0->data - src0_buf->base; + bufs[0].size = ggml_nbytes(src0); + bufs[0].flags = DSPQUEUE_BUFFER_FLAG_REF; + + // Second buffer Input Activations. This is a buffer that the CPU + // writes and the DSP reads, so we'll need to flush CPU caches and + // invalidate DSP ones. On platforms with I/O coherency support the + // framework will automatically skip cache operations where possible. + bufs[1].fd = src1_buf->fd; + bufs[1].ptr = src1->data; + bufs[1].offset = (uint8_t *) src1->data - src1_buf->base; + bufs[1].size = ggml_nbytes(src1); + bufs[1].flags = (DSPQUEUE_BUFFER_FLAG_REF | // Take a reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush CPU + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate DSP + + // Third buffer Output Activations. We'll handle DSP + // cache maintenance in the response message but need to flush + // CPU caches to ensure any previously written dirty lines are + // written out before writes from the DSP start. + bufs[2].fd = dst_buf->fd; + bufs[2].ptr = dst->data; + bufs[2].offset = (uint8_t *) dst->data - dst_buf->base; + bufs[2].size = ggml_nbytes(dst); + bufs[2].flags = (DSPQUEUE_BUFFER_FLAG_REF | DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER); + + // Primary DSP session from the src0 (normally weight) tensor + auto sess = src0_buf->sess; + + if (opt_verbose) { + char dims[64 * GGML_MAX_SRC]; + char strides[64 * GGML_MAX_SRC]; + char types[16 * GGML_MAX_SRC]; + char buffs[64 * GGML_MAX_SRC]; + char names[64 * GGML_MAX_SRC]; + + hex_format_op_dims(dims, op); + hex_format_op_strides(strides, op); + hex_format_op_types(types, op); + hex_format_op_buffs(buffs, op); + hex_format_op_names(names, op); + + HEX_VERBOSE("ggml-hex: %s %s: %s : %s : %s : %s : %s: flags 0x%x\n", sess->name.c_str(), ggml_op_name(op->op), + names, dims, types, strides, buffs, req.flags); + if (opt_verbose > 1) { + hex_dump_dspbuf(src0, &bufs[0]); + hex_dump_dspbuf(src1, &bufs[1]); + hex_dump_dspbuf(dst, &bufs[2]); + } + } + + if ((opt_opmask & HTP_OPMASK_QUEUE)) { + // Bump pending flag (cleared in the callback once we get the responce) + sess->op_pending++; // atomic inc + + int err = dspqueue_write(sess->queue, + 0, // flags - the framework will autoset this + 3, // number of buffers + bufs, // buffer references + sizeof(req), + (const uint8_t *) &req, // Message + 1000000 // Timeout + ); + + if (err != 0) { + GGML_ABORT("ggml-hex: %s dspqueue_write failed: 0x%08x\n", sess->name.c_str(), (unsigned) err); + } + } + + if (opt_opsync) { + while (sess->op_pending) { + ; + } + } + + t2 = ggml_time_us(); + + HEX_PROFILE( + "ggml-hex: %s %s %s %u:%u:%u:%u x %s %u:%u:%u:%u -> %s %u:%u:%u:%u : op-usec %u op-cycles %u op-pkts %u (%f) " + "call-usec %llu\n", + sess->name.c_str(), ggml_op_name(op->op), src0->name, (uint32_t) src0->ne[0], (uint32_t) src0->ne[1], + (uint32_t) src0->ne[2], (uint32_t) src0->ne[3], src1->name, (uint32_t) src1->ne[0], (uint32_t) src1->ne[1], + (uint32_t) src1->ne[2], (uint32_t) src1->ne[3], dst->name, (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], + (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], sess->prof_usecs, sess->prof_cycles, sess->prof_pkts, + (float) sess->prof_cycles / sess->prof_pkts, (unsigned long long) t2 - t1); +} + +static void ggml_hexagon_mul_mat_id(const struct ggml_tensor * op, uint32_t flags) { + const struct ggml_tensor * src0 = op->src[0]; + const struct ggml_tensor * src1 = op->src[1]; + const struct ggml_tensor * src2 = op->src[2]; + const struct ggml_tensor * dst = op; + + auto src0_buf = static_cast(src0->buffer->context); + auto src1_buf = static_cast(src1->buffer->context); + auto src2_buf = static_cast(src2->buffer->context); + auto dst_buf = static_cast(dst->buffer->context); + + uint64_t t1, t2; + t1 = ggml_time_us(); + + // Construct HTP message + htp_general_req req; + req.op = HTP_OP_MUL_MAT_ID; + req.flags = flags; + + init_htp_tensor(&req.src0, src0); + init_htp_tensor(&req.src1, src1); + init_htp_tensor(&req.src2, src2); + init_htp_tensor(&req.dst, dst); + + // Use opmask to override flags + if (!(opt_opmask & HTP_OPMASK_QUANTIZE)) { + req.flags |= HTP_OPFLAGS_SKIP_QUANTIZE; + } + if (!(opt_opmask & HTP_OPMASK_COMPUTE)) { + req.flags |= HTP_OPFLAGS_SKIP_COMPUTE; + } + + dspqueue_buffer bufs[4]; + memset(bufs, 0, sizeof(bufs)); + + // First buffer Weights. + // The content is static, there is no need to do any cache management + bufs[0].fd = src0_buf->fd; + bufs[0].ptr = src0->data; + bufs[0].offset = (uint8_t *) src0->data - src0_buf->base; + bufs[0].size = ggml_nbytes(src0); + bufs[0].flags = DSPQUEUE_BUFFER_FLAG_REF; + + // Second buffer Input Activations. This is a buffer that the CPU + // writes and the DSP reads, so we'll need to flush CPU caches and + // invalidate DSP ones. On platforms with I/O coherency support the + // framework will automatically skip cache operations where possible. + bufs[1].fd = src1_buf->fd; + bufs[1].ptr = src1->data; + bufs[1].offset = (uint8_t *) src1->data - src1_buf->base; + bufs[1].size = ggml_nbytes(src1); + bufs[1].flags = (DSPQUEUE_BUFFER_FLAG_REF | // Take a reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush CPU + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate DSP + + // Third buffer expert IDs. This is a buffer that the CPU + // writes and the DSP reads, so we'll need to flush CPU caches and + // invalidate DSP ones. On platforms with I/O coherency support the + // framework will automatically skip cache operations where possible. + bufs[2].fd = src2_buf->fd; + bufs[2].ptr = src2->data; + bufs[2].offset = (uint8_t *) src2->data - src2_buf->base; + bufs[2].size = ggml_nbytes(src2); + bufs[2].flags = (DSPQUEUE_BUFFER_FLAG_REF | // Take a reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush CPU + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate DSP + + // Forth buffer Output Activations. We'll handle DSP + // cache maintenance in the response message but need to flush + // CPU caches to ensure any previously written dirty lines are + // written out before writes from the DSP start. + bufs[3].fd = dst_buf->fd; + bufs[3].ptr = dst->data; + bufs[3].offset = (uint8_t *) dst->data - dst_buf->base; + bufs[3].size = ggml_nbytes(dst); + bufs[3].flags = (DSPQUEUE_BUFFER_FLAG_REF | DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER); + + // Primary DSP session from the src0 (normally weight) tensor + auto sess = src0_buf->sess; + + if (opt_verbose) { + char dims[64 * GGML_MAX_SRC]; + char strides[64 * GGML_MAX_SRC]; + char types[16 * GGML_MAX_SRC]; + char buffs[64 * GGML_MAX_SRC]; + char names[64 * GGML_MAX_SRC]; + + hex_format_op_dims(dims, op); + hex_format_op_types(types, op); + hex_format_op_buffs(buffs, op); + hex_format_op_names(names, op); + + HEX_VERBOSE("ggml-hex: %s %s: %s : %s : %s : %s : %s: flags 0x%x\n", sess->name.c_str(), ggml_op_name(op->op), + names, dims, types, strides, buffs, req.flags); + + if (opt_verbose > 1) { + hex_dump_dspbuf(src0, &bufs[0]); + hex_dump_dspbuf(src1, &bufs[1]); + hex_dump_dspbuf(src2, &bufs[2]); + hex_dump_dspbuf(dst, &bufs[3]); + } + } + + if ((opt_opmask & HTP_OPMASK_QUEUE)) { + // Bump pending flag (cleared in the callback once we get the responce) + sess->op_pending++; // atomic inc + + int err = dspqueue_write(sess->queue, + 0, // flags - the framework will autoset this + 4, // number of buffers + bufs, // buffer references + sizeof(req), + (const uint8_t *) &req, // Message + 1000000 // Timeout + ); + + if (err != 0) { + GGML_ABORT("ggml-hex: %s dspqueue_write failed: 0x%08x\n", sess->name.c_str(), (unsigned) err); + } + } + + if (opt_opsync) { + while (sess->op_pending) { + ; + } + } + + t2 = ggml_time_us(); + + HEX_PROFILE( + "ggml-hex: %s matmul-id %s %u:%u:%u:%u x %s %u:%u:%u:%u (%s %u:%u:%u:%u) -> %s %u:%u:%u:%u : op-usec %u " + "op-cycles %u op-pkts %u (%f) call-usec %llu\n", + sess->name.c_str(), src0->name, (uint32_t) src0->ne[0], (uint32_t) src0->ne[1], (uint32_t) src0->ne[2], + (uint32_t) src0->ne[3], src1->name, (uint32_t) src1->ne[0], (uint32_t) src1->ne[1], (uint32_t) src1->ne[2], + (uint32_t) src1->ne[3], src2->name, (uint32_t) src2->ne[0], (uint32_t) src2->ne[1], (uint32_t) src2->ne[2], + (uint32_t) src2->ne[3], dst->name, (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], + (uint32_t) dst->ne[3], sess->prof_usecs, sess->prof_cycles, sess->prof_pkts, + (float) sess->prof_cycles / sess->prof_pkts, (unsigned long long) t2 - t1); +} + +static void ggml_hexagon_binary(const struct ggml_tensor * op, uint32_t flags) { + const struct ggml_tensor * node = op; + const struct ggml_tensor * src0 = node->src[0]; + const struct ggml_tensor * src1 = node->src[1]; + const struct ggml_tensor * dst = node; + + auto src0_buf = static_cast(src0->buffer->context); + auto src1_buf = static_cast(src1->buffer->context); + auto dst_buf = static_cast(dst->buffer->context); + + uint64_t t1 = 0; + uint64_t t2 = 0; + + t1 = ggml_time_us(); + + // Construct HTP message + htp_general_req req; + req.flags = flags; + + // Use opmask to override flags + if (!(opt_opmask & HTP_OPMASK_QUANTIZE)) { + req.flags |= HTP_OPFLAGS_SKIP_QUANTIZE; + } + if (!(opt_opmask & HTP_OPMASK_COMPUTE)) { + req.flags |= HTP_OPFLAGS_SKIP_COMPUTE; + } + + switch (node->op) { + case GGML_OP_MUL: + req.op = HTP_OP_MUL; + break; + case GGML_OP_ADD: + req.op = HTP_OP_ADD; + break; + case GGML_OP_SUB: + req.op = HTP_OP_SUB; + break; + default: + GGML_ABORT("ggml-hex: binary : unsupported op:%d\n", node->op); + } + + init_htp_tensor(&req.src0, src0); + init_htp_tensor(&req.src1, src1); + init_htp_tensor(&req.dst, dst); + + dspqueue_buffer bufs[3]; + memset(bufs, 0, sizeof(bufs)); + + // First buffer = First Operand of Binary op + // This is a buffer that the CPU writes and the DSP reads, so we'll + // need to flush CPU caches and invalidate DSP ones. On platforms + // with I/O coherency support the framework will automatically skip + // cache operations where possible. + bufs[0].fd = src0_buf->fd; + bufs[0].ptr = src0->data; + bufs[0].offset = (uint8_t *) src0->data - src0_buf->base; + bufs[0].size = ggml_nbytes(src0); + bufs[0].flags = (DSPQUEUE_BUFFER_FLAG_REF | // Take a reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush CPU + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate DSP; + + // Second buffer = Second Operand of Binary op + // This is a buffer that the CPU writes and the DSP reads, so we'll + // need to flush CPU caches and invalidate DSP ones. On platforms + // with I/O coherency support the framework will automatically skip + // cache operations where possible. + bufs[1].fd = src1_buf->fd; + bufs[1].ptr = src1->data; + bufs[1].offset = (uint8_t *) src1->data - src1_buf->base; + bufs[1].size = ggml_nbytes(src1); + bufs[1].flags = (DSPQUEUE_BUFFER_FLAG_REF | // Take a reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush CPU + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate DSP + + // Third buffer = Output Activations. We'll handle DSP + // cache maintenance in the response message but need to flush + // CPU caches to ensure any previously written dirty lines are + // written out before writes from the DSP start. + bufs[2].fd = dst_buf->fd; + bufs[2].ptr = dst->data; + bufs[2].offset = (uint8_t *) dst->data - dst_buf->base; + bufs[2].size = ggml_nbytes(dst); + bufs[2].flags = (DSPQUEUE_BUFFER_FLAG_REF | DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER); + + // Primary DSP session from the src0 tensor + ggml_hexagon_session * sess = src0_buf->sess; + + if (opt_verbose) { + char dims[64 * GGML_MAX_SRC]; + char strides[16 * GGML_MAX_SRC]; + char types[16 * GGML_MAX_SRC]; + char buffs[64 * GGML_MAX_SRC]; + char names[64 * GGML_MAX_SRC]; + + hex_format_op_dims(dims, op); + hex_format_op_strides(strides, op); + hex_format_op_types(types, op); + hex_format_op_buffs(buffs, op); + hex_format_op_names(names, op); + + HEX_VERBOSE("ggml-hex: %s %s : %s : %s : %s : %s : %s : flags 0x%x\n", sess->name.c_str(), + ggml_op_name(node->op), names, dims, types, strides, buffs, req.flags); + if (opt_verbose > 1) { + hex_dump_dspbuf(src0, &bufs[0]); + hex_dump_dspbuf(src1, &bufs[1]); + hex_dump_dspbuf(dst, &bufs[2]); + } + } + + if ((opt_opmask & HTP_OPMASK_QUEUE)) { + // Bump pending flag (cleared in the callback once we get the responce) + sess->op_pending++; // atomic inc + + int err = dspqueue_write(sess->queue, + 0, // flags - the framework will autoset this + 3, // number of buffers + bufs, // buffer references + sizeof(req), + (const uint8_t *) &req, // Message + 1000000); // Timeout + + if (0 != err) { + GGML_ABORT("ggml-hex: %s dspqueue_write failed: 0x%08x\n", sess->name.c_str(), (unsigned) err); + } + } + + if (opt_opsync) { + while (sess->op_pending) { + ; + } + } + + t2 = ggml_time_us(); + + HEX_PROFILE( + "ggml-hex: %s %s %s %u:%u:%u:%u x %s %u:%u:%u:%u -> %s %u:%u:%u:%u : op-usec %u op-cycles %u op-pkts %u (%f) " + "call-usec %llu\n", + sess->name.c_str(), ggml_op_name(node->op), src0->name, (uint32_t) src0->ne[0], (uint32_t) src0->ne[1], + (uint32_t) src0->ne[2], (uint32_t) src0->ne[3], src1->name, (uint32_t) src1->ne[0], (uint32_t) src1->ne[1], + (uint32_t) src1->ne[2], (uint32_t) src1->ne[3], dst->name, (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], + (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], sess->prof_usecs, sess->prof_cycles, sess->prof_pkts, + (float) sess->prof_cycles / sess->prof_pkts, (unsigned long long) t2 - t1); +} + +static void ggml_hexagon_add_id(const struct ggml_tensor * op, uint32_t flags) { + const struct ggml_tensor * node = op; + const struct ggml_tensor * src0 = node->src[0]; + const struct ggml_tensor * src1 = node->src[1]; + const struct ggml_tensor * src2 = node->src[2]; + const struct ggml_tensor * dst = node; + + auto src0_buf = static_cast(src0->buffer->context); + auto src1_buf = static_cast(src1->buffer->context); + auto src2_buf = static_cast(src2->buffer->context); + auto dst_buf = static_cast(dst->buffer->context); + + uint64_t t1 = 0; + uint64_t t2 = 0; + + t1 = ggml_time_us(); + + // Construct HTP message + htp_general_req req; + req.flags = flags; + + // Use opmask to override flags + if (!(opt_opmask & HTP_OPMASK_QUANTIZE)) { + req.flags |= HTP_OPFLAGS_SKIP_QUANTIZE; + } + if (!(opt_opmask & HTP_OPMASK_COMPUTE)) { + req.flags |= HTP_OPFLAGS_SKIP_COMPUTE; + } + + switch (node->op) { + case GGML_OP_ADD_ID: + req.op = HTP_OP_ADD_ID; + break; + default: + GGML_ABORT("ggml-hex: unsupported op:%d\n", node->op); + } + + init_htp_tensor(&req.src0, src0); + init_htp_tensor(&req.src1, src1); + init_htp_tensor(&req.src2, src2); + init_htp_tensor(&req.dst, dst); + + dspqueue_buffer bufs[4]; + memset(bufs, 0, sizeof(bufs)); + + // First buffer = input activations + bufs[0].fd = src0_buf->fd; + bufs[0].ptr = src0->data; + bufs[0].offset = (uint8_t *) src0->data - src0_buf->base; + bufs[0].size = ggml_nbytes(src0); + bufs[0].flags = (DSPQUEUE_BUFFER_FLAG_REF | // Take a reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush CPU + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate DSP; + + // Second buffer = experts bias + bufs[1].fd = src1_buf->fd; + bufs[1].ptr = src1->data; + bufs[1].offset = (uint8_t *) src1->data - src1_buf->base; + bufs[1].size = ggml_nbytes(src1); + bufs[1].flags = (DSPQUEUE_BUFFER_FLAG_REF | // Take a reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush CPU + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate DSP + + // Third buffer = activated experts + bufs[2].fd = src2_buf->fd; + bufs[2].ptr = src2->data; + bufs[2].offset = (uint8_t *) src2->data - src2_buf->base; + bufs[2].size = ggml_nbytes(src2); + bufs[2].flags = (DSPQUEUE_BUFFER_FLAG_REF | // Take a reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush CPU + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate DSP + + // Forth buffer = output activations + bufs[3].fd = dst_buf->fd; + bufs[3].ptr = dst->data; + bufs[3].offset = (uint8_t *) dst->data - dst_buf->base; + bufs[3].size = ggml_nbytes(dst); + bufs[3].flags = (DSPQUEUE_BUFFER_FLAG_REF | DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER); + + // Primary DSP session from the src0 tensor + ggml_hexagon_session * sess = src0_buf->sess; + + if (opt_verbose) { + char dims[64 * GGML_MAX_SRC]; + char strides[16 * GGML_MAX_SRC]; + char types[16 * GGML_MAX_SRC]; + char buffs[64 * GGML_MAX_SRC]; + char names[64 * GGML_MAX_SRC]; + + hex_format_op_dims(dims, op); + hex_format_op_strides(strides, op); + hex_format_op_types(types, op); + hex_format_op_buffs(buffs, op); + hex_format_op_names(names, op); + + HEX_VERBOSE("ggml-hex: %s %s : %s : %s : %s : %s : %s : flags 0x%x\n", sess->name.c_str(), + ggml_op_name(node->op), names, dims, types, strides, buffs, req.flags); + + if (opt_verbose > 1) { + hex_dump_dspbuf(src0, &bufs[0]); + hex_dump_dspbuf(src1, &bufs[1]); + hex_dump_dspbuf(src2, &bufs[2]); + hex_dump_dspbuf(dst, &bufs[3]); + } + } + + if ((opt_opmask & HTP_OPMASK_QUEUE)) { + // Bump pending flag (cleared in the callback once we get the responce) + sess->op_pending++; // atomic inc + + int err = dspqueue_write(sess->queue, + 0, // flags - the framework will autoset this + 4, // number of buffers + bufs, // buffer references + sizeof(req), + (const uint8_t *) &req, // Message + 1000000); // Timeout + + if (0 != err) { + GGML_ABORT("ggml-hex: %s dspqueue_write failed: 0x%08x\n", sess->name.c_str(), (unsigned) err); + } + } + + if (opt_opsync) { + while (sess->op_pending) { + ; + } + } + + t2 = ggml_time_us(); + + HEX_PROFILE( + "ggml-hex: %s %s %s %u:%u:%u:%u x %s %u:%u:%u:%u -> %s %u:%u:%u:%u : op-usec %u op-cycles %u op-pkts %u (%f) " + "call-usec %llu\n", + sess->name.c_str(), ggml_op_name(node->op), src0->name, (uint32_t) src0->ne[0], (uint32_t) src0->ne[1], + (uint32_t) src0->ne[2], (uint32_t) src0->ne[3], src1->name, (uint32_t) src1->ne[0], (uint32_t) src1->ne[1], + (uint32_t) src1->ne[2], (uint32_t) src1->ne[3], dst->name, (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], + (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], sess->prof_usecs, sess->prof_cycles, sess->prof_pkts, + (float) sess->prof_cycles / sess->prof_pkts, (unsigned long long) t2 - t1); +} + +static void ggml_hexagon_unary(const struct ggml_tensor * op, uint32_t flags) { + const struct ggml_tensor * src0 = op->src[0]; + const struct ggml_tensor * src1 = op->src[1]; + const struct ggml_tensor * dst = op; + + uint64_t t1 = 0; + uint64_t t2 = 0; + + t1 = ggml_time_us(); + + // Construct HTP message + htp_general_req req; + + memset(&req, 0, sizeof(htp_general_req)); + memcpy(&req.op_params, &op->op_params, sizeof(op->op_params)); + req.flags = flags; + + bool supported = false; + + switch (op->op) { + case GGML_OP_RMS_NORM: + req.op = HTP_OP_RMS_NORM; + supported = true; + break; + + case GGML_OP_UNARY: + if (ggml_get_unary_op(dst) == GGML_UNARY_OP_SILU) { + req.op = HTP_OP_UNARY_SILU; + supported = true; + } + break; + + case GGML_OP_GLU: + if (ggml_get_glu_op(dst) == GGML_GLU_OP_SWIGLU) { + req.op = HTP_OP_GLU_SWIGLU; + supported = true; + } else if (ggml_get_glu_op(dst) == GGML_GLU_OP_SWIGLU_OAI) { + req.op = HTP_OP_GLU_SWIGLU_OAI; + supported = true; + } + break; + + case GGML_OP_SOFT_MAX: + req.op = HTP_OP_SOFTMAX; + supported = true; + + default: + break; + } + + if (!supported) { + GGML_ABORT("ggml-hex: unary : unsupported op:%d\n", op->op); + } + + init_htp_tensor(&req.dst, dst); + init_htp_tensor(&req.src0, src0); + if (src1) { + init_htp_tensor(&req.src1, src1); + } + + // Use opmask to override flags + if (!(opt_opmask & HTP_OPMASK_QUANTIZE)) { + req.flags |= HTP_OPFLAGS_SKIP_QUANTIZE; + } + if (!(opt_opmask & HTP_OPMASK_COMPUTE)) { + req.flags |= HTP_OPFLAGS_SKIP_COMPUTE; + } + + dspqueue_buffer bufs[3]; + int n_bufs = 0; + + memset(bufs, 0, sizeof(bufs)); + + // First buffer = Only Operand of Unary op + // This is a buffer that the CPU writes and the DSP reads, so we'll + // need to flush CPU caches and invalidate DSP ones. On platforms + // with I/O coherency support the framework will automatically skip + // cache operations where possible. + auto src0_buf = static_cast(src0->buffer->context); + bufs[n_bufs].fd = src0_buf->fd; + bufs[n_bufs].ptr = src0->data; + bufs[n_bufs].offset = (uint8_t *) src0->data - src0_buf->base; + bufs[n_bufs].size = ggml_nbytes(src0); + bufs[n_bufs].flags = (DSPQUEUE_BUFFER_FLAG_REF | // Take a reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush CPU + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate DSP; + ++n_bufs; + + if (src1) { + // Second buffer = Second Operand of Binary op + // This is a buffer that the CPU writes and the DSP reads, so we'll + // need to flush CPU caches and invalidate DSP ones. On platforms + // with I/O coherency support the framework will automatically skip + // cache operations where possible. + auto src1_buf = static_cast(src1->buffer->context); + bufs[n_bufs].fd = src1_buf->fd; + bufs[n_bufs].ptr = src1->data; + bufs[n_bufs].offset = (uint8_t *) src1->data - src1_buf->base; + bufs[n_bufs].size = ggml_nbytes(src1); + bufs[n_bufs].flags = (DSPQUEUE_BUFFER_FLAG_REF | // Take a reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush CPU + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate DSP + ++n_bufs; + } + + // Second or third buffer = Output Activations. We'll handle DSP + // Second buffer = Output Activations. We'll handle DSP + // cache maintenance in the response message but need to flush + // CPU caches to ensure any previously written dirty lines are + // written out before writes from the DSP start. + auto dst_buf = static_cast(dst->buffer->context); + bufs[n_bufs].fd = dst_buf->fd; + bufs[n_bufs].ptr = dst->data; + bufs[n_bufs].offset = (uint8_t *) dst->data - dst_buf->base; + bufs[n_bufs].size = ggml_nbytes(dst); + bufs[n_bufs].flags = (DSPQUEUE_BUFFER_FLAG_REF | DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER); + ++n_bufs; + + // Primary DSP session from the src0 tensor + ggml_hexagon_session * sess = src0_buf->sess; + + if (opt_verbose) { + char dims[64 * GGML_MAX_SRC]; + char strides[64 * GGML_MAX_SRC]; + char types[16 * GGML_MAX_SRC]; + char buffs[64 * GGML_MAX_SRC]; + char names[64 * GGML_MAX_SRC]; + + hex_format_op_dims(dims, op); + hex_format_op_strides(strides, op); + hex_format_op_types(types, op); + hex_format_op_buffs(buffs, op); + hex_format_op_names(names, op); + + HEX_VERBOSE("ggml-hex: %s %s : %s : %s : %s : %s : %s : flags 0x%x\n", sess->name.c_str(), ggml_op_name(op->op), + names, dims, types, strides, buffs, req.flags); + if (opt_verbose > 1) { + hex_dump_dspbuf(src0, &bufs[0]); + if (src1) { + hex_dump_dspbuf(src1, &bufs[1]); + hex_dump_dspbuf(dst, &bufs[2]); + } else { + hex_dump_dspbuf(dst, &bufs[1]); + } + } + } + + if ((opt_opmask & HTP_OPMASK_QUEUE)) { + // Bump pending flag (cleared in the callback once we get the responce) + sess->op_pending++; // atomic inc + + int err = dspqueue_write(sess->queue, + 0, // flags - the framework will autoset this + n_bufs, // number of buffers + bufs, // buffer references + sizeof(req), + (const uint8_t *) &req, // Message + 1000000); // Timeout + + if (0 != err) { + GGML_ABORT("ggml-hex: %s dspqueue_write failed: 0x%08x\n", sess->name.c_str(), (unsigned) err); + } + } + + if (opt_opsync) { + while (sess->op_pending) { + ; + } + } + + t2 = ggml_time_us(); + + if (src1) { + HEX_PROFILE( + "ggml-hex: %s %s %s %u:%u:%u:%u x %s %u:%u:%u:%u -> %s %u:%u:%u:%u : op-usec %u op-cycles %u op-pkts %u " + "(%f) call-usec %llu\n", + sess->name.c_str(), ggml_op_name(op->op), src0->name, (uint32_t) src0->ne[0], (uint32_t) src0->ne[1], + (uint32_t) src0->ne[2], (uint32_t) src0->ne[3], src1->name, (uint32_t) src1->ne[0], (uint32_t) src1->ne[1], + (uint32_t) src1->ne[2], (uint32_t) src1->ne[3], dst->name, (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], + (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], sess->prof_usecs, sess->prof_cycles, sess->prof_pkts, + (float) sess->prof_cycles / sess->prof_pkts, (unsigned long long) t2 - t1); + } else { + HEX_PROFILE( + "ggml-hex: %s %s %s %u:%u:%u:%u -> %s %u:%u:%u:%u : op-usec %u op-cycles %u op-pkts %u (%f) call-usec " + "%llu\n", + sess->name.c_str(), ggml_op_name(op->op), src0->name, (uint32_t) src0->ne[0], (uint32_t) src0->ne[1], + (uint32_t) src0->ne[2], (uint32_t) src0->ne[3], dst->name, (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], + (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], sess->prof_usecs, sess->prof_cycles, sess->prof_pkts, + (float) sess->prof_cycles / sess->prof_pkts, (unsigned long long) t2 - t1); + } +} + +static void ggml_hexagon_rope(const struct ggml_tensor * op, uint32_t flags) { + const struct ggml_tensor * src0 = op->src[0]; + const struct ggml_tensor * src1 = op->src[1]; + const struct ggml_tensor * src2 = op->src[2]; + const struct ggml_tensor * dst = op; + + uint64_t t1 = 0; + uint64_t t2 = 0; + + t1 = ggml_time_us(); + + // Construct HTP message + htp_general_req req; + + memset(&req, 0, sizeof(htp_general_req)); + memcpy(&req.op_params, &op->op_params, sizeof(op->op_params)); + req.flags = flags; + req.op = HTP_OP_ROPE; + + init_htp_tensor(&req.dst, dst); + init_htp_tensor(&req.src0, src0); + init_htp_tensor(&req.src1, src1); + if (src2) { + init_htp_tensor(&req.src2, src2); + } + + // Use opmask to override flags + if (!(opt_opmask & HTP_OPMASK_QUANTIZE)) { + req.flags |= HTP_OPFLAGS_SKIP_QUANTIZE; + } + if (!(opt_opmask & HTP_OPMASK_COMPUTE)) { + req.flags |= HTP_OPFLAGS_SKIP_COMPUTE; + } + + dspqueue_buffer bufs[4]; + int n_bufs = 0; + + memset(bufs, 0, sizeof(bufs)); + + // First buffer + // This is a buffer that the CPU writes and the DSP reads, so we'll + // need to flush CPU caches and invalidate DSP ones. On platforms + // with I/O coherency support the framework will automatically skip + // cache operations where possible. + auto src0_buf = static_cast(src0->buffer->context); + bufs[n_bufs].fd = src0_buf->fd; + bufs[n_bufs].ptr = src0->data; + bufs[n_bufs].offset = (uint8_t *) src0->data - src0_buf->base; + bufs[n_bufs].size = ggml_nbytes(src0); + bufs[n_bufs].flags = (DSPQUEUE_BUFFER_FLAG_REF | // Take a reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush CPU + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate DSP; + ++n_bufs; + + // Second buffer + // This is a buffer that the CPU writes and the DSP reads, so we'll + // need to flush CPU caches and invalidate DSP ones. On platforms + // with I/O coherency support the framework will automatically skip + // cache operations where possible. + auto src1_buf = static_cast(src1->buffer->context); + bufs[n_bufs].fd = src1_buf->fd; + bufs[n_bufs].ptr = src1->data; + bufs[n_bufs].offset = (uint8_t *) src1->data - src1_buf->base; + bufs[n_bufs].size = ggml_nbytes(src1); + bufs[n_bufs].flags = (DSPQUEUE_BUFFER_FLAG_REF | // Take a reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush CPU + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate DSP + ++n_bufs; + + if (src2) { + // Third buffer + // This is a buffer that the CPU writes and the DSP reads, so we'll + // need to flush CPU caches and invalidate DSP ones. On platforms + // with I/O coherency support the framework will automatically skip + // cache operations where possible. + auto src2_buf = static_cast(src2->buffer->context); + bufs[n_bufs].fd = src2_buf->fd; + bufs[n_bufs].ptr = src2->data; + bufs[n_bufs].offset = (uint8_t *) src2->data - src2_buf->base; + bufs[n_bufs].size = ggml_nbytes(src2); + bufs[n_bufs].flags = (DSPQUEUE_BUFFER_FLAG_REF | // Take a reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush CPU + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate DSP + ++n_bufs; + } + + // Final buffer = Output Activations. We'll handle DSP + // Second buffer = Output Activations. We'll handle DSP + // cache maintenance in the response message but need to flush + // CPU caches to ensure any previously written dirty lines are + // written out before writes from the DSP start. + auto dst_buf = static_cast(dst->buffer->context); + bufs[n_bufs].fd = dst_buf->fd; + bufs[n_bufs].ptr = dst->data; + bufs[n_bufs].offset = (uint8_t *) dst->data - dst_buf->base; + bufs[n_bufs].size = ggml_nbytes(dst); + bufs[n_bufs].flags = (DSPQUEUE_BUFFER_FLAG_REF | DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER); + ++n_bufs; + + // Primary DSP session from the src0 tensor + ggml_hexagon_session * sess = src0_buf->sess; + + if (opt_verbose) { + char dims[64 * GGML_MAX_SRC]; + char strides[64 * GGML_MAX_SRC]; + char types[16 * GGML_MAX_SRC]; + char buffs[64 * GGML_MAX_SRC]; + char names[64 * GGML_MAX_SRC]; + + hex_format_op_dims(dims, op); + hex_format_op_strides(strides, op); + hex_format_op_types(types, op); + hex_format_op_buffs(buffs, op); + hex_format_op_names(names, op); + + HEX_VERBOSE("ggml-hex: %s %s : %s : %s : %s : %s : %s : flags 0x%x\n", sess->name.c_str(), ggml_op_name(op->op), + names, dims, types, strides, buffs, req.flags); + if (opt_verbose > 1) { + hex_dump_dspbuf(src0, &bufs[0]); + if (src1) { + hex_dump_dspbuf(src1, &bufs[1]); + hex_dump_dspbuf(dst, &bufs[2]); + } else { + hex_dump_dspbuf(dst, &bufs[1]); + } + } + } + + if ((opt_opmask & HTP_OPMASK_QUEUE)) { + // Bump pending flag (cleared in the callback once we get the responce) + sess->op_pending++; // atomic inc + + int err = dspqueue_write(sess->queue, + 0, // flags - the framework will autoset this + n_bufs, // number of buffers + bufs, // buffer references + sizeof(req), + (const uint8_t *) &req, // Message + 1000000); // Timeout + + if (0 != err) { + GGML_ABORT("ggml-hex: %s dspqueue_write failed: 0x%08x\n", sess->name.c_str(), (unsigned) err); + } + } + + if (opt_opsync) { + while (sess->op_pending) { + ; + } + } + + t2 = ggml_time_us(); + + if (src2) { + HEX_PROFILE( + "ggml-hex: %s %s %s %u:%u:%u:%u x %s %u:%u:%u:%u x %s %u:%u:%u:%u -> %s %u:%u:%u:%u : op-usec %u op-cycles " + "%u op-pkts %u (%f) call-usec %llu\n", + sess->name.c_str(), ggml_op_name(op->op), src0->name, (uint32_t) src0->ne[0], (uint32_t) src0->ne[1], + (uint32_t) src0->ne[2], (uint32_t) src0->ne[3], src1->name, (uint32_t) src1->ne[0], (uint32_t) src1->ne[1], + (uint32_t) src1->ne[2], (uint32_t) src1->ne[3], src2->name, (uint32_t) src2->ne[0], (uint32_t) src2->ne[1], + (uint32_t) src2->ne[2], (uint32_t) src2->ne[3], dst->name, (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], + (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], sess->prof_usecs, sess->prof_cycles, sess->prof_pkts, + (float) sess->prof_cycles / sess->prof_pkts, (unsigned long long) t2 - t1); + } else { + HEX_PROFILE( + "ggml-hex: %s %s %s %u:%u:%u:%u x %s %u:%u:%u:%u -> %s %u:%u:%u:%u : op-usec %u op-cycles %u op-pkts %u " + "(%f) call-usec %llu\n", + sess->name.c_str(), ggml_op_name(op->op), src0->name, (uint32_t) src0->ne[0], (uint32_t) src0->ne[1], + (uint32_t) src0->ne[2], (uint32_t) src0->ne[3], src1->name, (uint32_t) src1->ne[0], (uint32_t) src1->ne[1], + (uint32_t) src1->ne[2], (uint32_t) src1->ne[3], dst->name, (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], + (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], sess->prof_usecs, sess->prof_cycles, sess->prof_pkts, + (float) sess->prof_cycles / sess->prof_pkts, (unsigned long long) t2 - t1); + } +} + +static const char * ggml_backend_hexagon_name(ggml_backend_t backend) { + auto sess = static_cast(backend->context); + return sess->name.c_str(); +} + +static void ggml_backend_hexagon_free(ggml_backend_t backend) { + // we just need to delete the backend here + // the sessions are allocated & freed as part of the registry + delete backend; +} + +static inline bool op_reuse_src1(const ggml_tensor * op1, const ggml_tensor * op0) { + return (op0 && op0->src[1] == op1->src[1]); +} + +// scan the graph and figure out last compute op index +static inline int last_compute_op(ggml_cgraph * graph) { + int last; + for (int i = 0; i < graph->n_nodes; ++i) { + ggml_tensor * node = graph->nodes[i]; + + switch (node->op) { + case GGML_OP_MUL_MAT: + case GGML_OP_MUL_MAT_ID: + case GGML_OP_MUL: + case GGML_OP_ADD: + case GGML_OP_SUB: + case GGML_OP_RMS_NORM: + case GGML_OP_GLU: + case GGML_OP_ADD_ID: + last = i; + break; + + default: + break; + } + } + + return last; +} + +static ggml_status ggml_backend_hexagon_graph_compute(ggml_backend_t backend, ggml_cgraph * graph) { + auto sess = static_cast(backend->context); + + HEX_VERBOSE("ggml-hex: %s graph-compute n_nodes %d\n", sess->name.c_str(), graph->n_nodes); + + const int last = last_compute_op(graph); + + const struct ggml_tensor * prev_quant_op = nullptr; // prev executed op with quantizer + + for (int i = 0; i < graph->n_nodes; ++i) { + ggml_tensor * node = graph->nodes[i]; + + uint32_t flags = 0; + + // skip quantizer if src1 is reused + if (op_reuse_src1(node, prev_quant_op)) { + flags |= HTP_OPFLAGS_SKIP_QUANTIZE; + } + + // ask for early notification for the last Op + if (i == last) { + flags |= HTP_OPFLAGS_EARLY_WAKEUP; + } + + switch (node->op) { + case GGML_OP_MUL_MAT: + ggml_hexagon_mul_mat(node, flags); + prev_quant_op = node; + break; + case GGML_OP_MUL_MAT_ID: + ggml_hexagon_mul_mat_id(node, flags); + prev_quant_op = node; + break; + case GGML_OP_MUL: + case GGML_OP_ADD: + case GGML_OP_SUB: + ggml_hexagon_binary(node, flags); + break; + case GGML_OP_ADD_ID: + ggml_hexagon_add_id(node, flags); + break; + case GGML_OP_RMS_NORM: + ggml_hexagon_unary(node, flags); + break; + case GGML_OP_UNARY: + if (ggml_get_unary_op(node) == GGML_UNARY_OP_SILU) { + ggml_hexagon_unary(node, flags); + } + break; + case GGML_OP_GLU: + if ((ggml_get_glu_op(node) == GGML_GLU_OP_SWIGLU) || + (ggml_get_glu_op(node) == GGML_GLU_OP_SWIGLU_OAI)) { + ggml_hexagon_unary(node, flags); + } + break; + case GGML_OP_SOFT_MAX: + ggml_hexagon_unary(node, flags); + break; + + case GGML_OP_ROPE: + ggml_hexagon_rope(node, flags); + break; + + // non-compute ops + case GGML_OP_NONE: + case GGML_OP_RESHAPE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + case GGML_OP_TRANSPOSE: + break; + + default: + GGML_ABORT("\nggml-hex: graph-compute %s is not supported\n", ggml_op_desc(node)); + } + } + + // Wait until all pending ops complete + while (sess->op_pending) { + ; + } + + return GGML_STATUS_SUCCESS; +} + +static void ggml_backend_hexagon_synchronize(ggml_backend_t backend) { + auto sess = static_cast(backend->context); + + HEX_VERBOSE("ggml-hex: %s synchronize\n", sess->name.c_str()); + + // Wait until all pending ops complete + while (sess->op_pending) { + ; + } +} + +struct node_info { + ggml_tensor * node; + + std::vector fused; + + ggml_op op() const { + return node->op; + } + + const ggml_tensor * dst() const { + return fused.empty() ? node : fused.back(); + } + + const ggml_tensor * src0() const { + return node->src[0]; + } + + const ggml_tensor * src1() const { + return node->src[1]; + } + + bool is_empty() const { + return ggml_op_is_empty(node->op); + } + + void add_fused(ggml_tensor * t) { + fused.push_back(t); + } + + bool stackable() const { + switch (this->op()) { + case GGML_OP_MUL_MAT: + case GGML_OP_MUL_MAT_ID: + return ggml_is_quantized(this->src0()->type); + default: + return false; + } + } + + bool same_input(const node_info& n) const { + return n.src1() == this->src1(); + } +}; + +static std::vector ggml_hexagon_graph_optimize_reorder(const std::vector & nodes) { + const int n = nodes.size(); + + std::vector res; + res.reserve(n); + + std::vector used(n, false); + + // The main goal here is to stack the MUL_MAT ops with the same src1 input. + // This allows use to reuse dynamically quantized src1 in VTCM. + + // TODO: the current version might do incorrect reodering in cases where quantized src0 + // input is an output of another Op. + + for (int i0 = 0; i0 < n; i0++) { + if (used[i0]) { + continue; + } + + res.push_back(i0); + + const auto & node0 = nodes[i0]; + + if (!node0.stackable()) { + continue; + } + + // that many nodes forward to search for stackable nodes that can reuse VTCM + constexpr int N_FORWARD = 8; + + for (int i1 = i0 + 1; i1 < i0 + N_FORWARD && i1 < n; i1++) { + if (used[i1]) { + continue; + } + + const auto & node1 = nodes[i1]; + + if (node1.stackable() && node1.same_input(node0)) { + res.push_back(i1); + used[i1] = true; + } + } + } + + return res; +} + +static void ggml_backend_hexagon_graph_optimize(ggml_backend_t backend, ggml_cgraph * gf) { + const int n = gf->n_nodes; + + constexpr int MAX_FUSE = 16; + + enum ggml_op ops[MAX_FUSE]; + + std::vector nodes; + nodes.reserve(gf->n_nodes); + + // fuse nodes: + // we don't want to make reorders that break fusing, so we first pack all fusable tensors + // and perform the reorder over the fused nodes. after the reorder is done, we unfuse + for (int i = 0; i < n; i++) { + node_info node = { + /*.node =*/ gf->nodes[i], + /*.fused =*/ {}, + }; + + // fuse only ops that start with these operations + // can be expanded when needed + if (node.op() == GGML_OP_ADD || + node.op() == GGML_OP_NORM || + node.op() == GGML_OP_RMS_NORM) { + ops[0] = node.op(); + + int f = i + 1; + while (f < n && f < i + MAX_FUSE) { + // conservatively allow fusing only these ops + // can be expanded when needed + if (gf->nodes[f]->op != GGML_OP_ADD && + gf->nodes[f]->op != GGML_OP_MUL && + gf->nodes[f]->op != GGML_OP_NORM && + gf->nodes[f]->op != GGML_OP_RMS_NORM) { + break; + } + ops[f - i] = gf->nodes[f]->op; + f++; + } + + f -= i; + for (; f > 1; f--) { + if (ggml_can_fuse(gf, i, ops, f)) { + break; + } + } + + // add the fused tensors into the node info so we can unfuse them later + for (int k = 1; k < f; k++) { + ++i; + + // the .dst() becomes the last fused tensor + node.add_fused(gf->nodes[i]); + } + } + + nodes.push_back(std::move(node)); + } + + const auto order = ggml_hexagon_graph_optimize_reorder(nodes); + + // unfuse + { + int j = 0; + for (const auto i : order) { + const auto & node = nodes[i]; + + gf->nodes[j++] = node.node; + + for (auto * fused : node.fused) { + gf->nodes[j++] = fused; + } + } + } +} + +static struct ggml_backend_i hexagon_backend_i = { + /* .get_name = */ ggml_backend_hexagon_name, + /* .free = */ ggml_backend_hexagon_free, + /* .set_tensor_async = */ NULL, + /* .get_tensor_async = */ NULL, + /* .cpy_tensor_async = */ NULL, + /* .synchronize = */ ggml_backend_hexagon_synchronize, + /* .graph_plan_create = */ NULL, + /* .graph_plan_free = */ NULL, + /* .graph_plan_update = */ NULL, + /* .graph_plan_compute = */ NULL, + /* .graph_compute = */ ggml_backend_hexagon_graph_compute, + /* .event_record = */ NULL, + /* .event_wait = */ NULL, + /* .graph_optimize = */ ggml_backend_hexagon_graph_optimize, +}; + +static ggml_guid_t ggml_backend_hexagon_guid() { + static ggml_guid guid = { 0x7b, 0x57, 0xdc, 0xaf, 0xde, 0x12, 0x1d, 0x49, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11 }; + return &guid; +} + +bool ggml_backend_is_hexagon(ggml_backend_t backend) { + return backend && backend->iface.get_name == ggml_backend_hexagon_name; +} + +// device interface + +static ggml_backend_t ggml_backend_hexagon_device_init(ggml_backend_dev_t dev, const char * params) { + auto sess = static_cast(dev->context); + + return new ggml_backend{ + /* .guid = */ ggml_backend_hexagon_guid(), + /* .interface = */ hexagon_backend_i, + /* .device = */ dev, + /* .context = */ sess, + }; + + GGML_UNUSED(params); +} + +static const char * ggml_backend_hexagon_device_get_name(ggml_backend_dev_t dev) { + auto sess = static_cast(dev->context); + return sess->name.c_str(); + + GGML_UNUSED(dev); +} + +static const char * ggml_backend_hexagon_device_get_description(ggml_backend_dev_t dev) { + return "Hexagon"; + GGML_UNUSED(dev); +} + +static void ggml_backend_hexagon_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { + // ~2GB per session for now + *free = 2ULL * 1024 * 1024 * 1024; + *total = *free; + + GGML_UNUSED(dev); +} + +static enum ggml_backend_dev_type ggml_backend_hexagon_device_get_type(ggml_backend_dev_t dev) { + return GGML_BACKEND_DEVICE_TYPE_GPU; + + GGML_UNUSED(dev); +} + +static void ggml_backend_hexagon_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { + props->name = ggml_backend_hexagon_device_get_name(dev); + props->description = ggml_backend_hexagon_device_get_description(dev); + props->type = ggml_backend_hexagon_device_get_type(dev); + ggml_backend_hexagon_device_get_memory(dev, &props->memory_free, &props->memory_total); + props->caps = { + /* .async = */ true, + /* .host_buffer = */ (bool) opt_hostbuf, + /* .buffer_from_host_ptr = */ false, + /* .events = */ false, + }; +} + +static ggml_backend_buffer_type_t ggml_backend_hexagon_device_get_buffer_type(ggml_backend_dev_t dev) { + auto sess = static_cast(dev->context); + return &sess->buffer_type; +} + +static ggml_backend_buffer_type_t ggml_backend_hexagon_device_get_repack_buffer_type(ggml_backend_dev_t dev) { + auto sess = static_cast(dev->context); + return &sess->repack_buffer_type; +} + +static bool ggml_backend_hexagon_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { + auto sess = static_cast(dev->context); + + bool supp = false; + + switch (op->op) { + case GGML_OP_NONE: + case GGML_OP_RESHAPE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + case GGML_OP_TRANSPOSE: + supp = true; + break; + + case GGML_OP_MUL_MAT: + supp = ggml_hexagon_supported_mul_mat(sess, op); + break; + + case GGML_OP_MUL_MAT_ID: + supp = ggml_hexagon_supported_mul_mat_id(sess, op); + break; + + case GGML_OP_MUL: + case GGML_OP_ADD: + case GGML_OP_SUB: + supp = ggml_hexagon_supported_binary(sess, op); + break; + + case GGML_OP_ADD_ID: + supp = ggml_hexagon_supported_add_id(sess, op); + break; + + case GGML_OP_RMS_NORM: + supp = ggml_hexagon_supported_unary(sess, op); + break; + + case GGML_OP_SOFT_MAX: + supp = ggml_hexagon_supported_softmax(sess, op); + break; + + case GGML_OP_UNARY: + if (ggml_get_unary_op(op) == GGML_UNARY_OP_SILU) { + supp = ggml_hexagon_supported_activations(sess, op); + } + break; + + case GGML_OP_GLU: + if ((ggml_get_glu_op(op) == GGML_GLU_OP_SWIGLU) /* || (ggml_get_glu_op(op) == GGML_GLU_OP_SWIGLU_OAI) */) { + supp = ggml_hexagon_supported_activations(sess, op); + } + break; + + case GGML_OP_ROPE: + supp = ggml_hexagon_supported_rope(sess, op); + break; + + default: + break; + } + + if (opt_verbose) { + char dims[64 * GGML_MAX_SRC]; + char strides[64 * GGML_MAX_SRC]; + char types[16 * GGML_MAX_SRC]; + char buffs[64 * GGML_MAX_SRC]; + char names[64 * GGML_MAX_SRC]; + + hex_format_op_dims(dims, op); + hex_format_op_strides(strides, op); + hex_format_op_types(types, op); + hex_format_op_buffs(buffs, op); + hex_format_op_names(names, op); + + HEX_VERBOSE("ggml-hex: %s device-supports-op %s : %s : %s : %s : %s : %s : (%d)\n", sess->name.c_str(), + ggml_op_name(op->op), names, dims, types, strides, buffs, (int) supp); + } + + return supp; + + GGML_UNUSED(dev); +} + +static bool ggml_backend_hexagon_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { + if (buft->iface.get_alignment != ggml_backend_hexagon_buffer_type_get_alignment) { + return false; + } + + auto s0 = static_cast(dev->context); + auto s1 = static_cast(buft->context)->sess; + + // Need session/domain-id for buffers to be compatible + bool supp = (s0->session_id == s1->session_id); + + HEX_VERBOSE("ggml-hex: %s device-supports-buft %s (%d)\n", s0->name.c_str(), s1->name.c_str(), (int) supp); + + return supp; +} + +static ggml_backend_buffer_type_t * ggml_backend_hexagon_device_get_extra_buffers_type(ggml_backend_dev_t dev) { + auto s0 = static_cast(dev->context); + HEX_VERBOSE("ggml-hex: device-get-extra-buft : %s \n", s0->name.c_str()); + + static ggml_backend_buffer_type_t bufts[2]; + bufts[0] = ggml_backend_hexagon_device_get_repack_buffer_type(dev); + bufts[1] = NULL; + return bufts; +} + +static const struct ggml_backend_device_i ggml_backend_hexagon_device_i = { + /* .get_name = */ ggml_backend_hexagon_device_get_name, + /* .get_description = */ ggml_backend_hexagon_device_get_description, + /* .get_memory = */ ggml_backend_hexagon_device_get_memory, + /* .get_type = */ ggml_backend_hexagon_device_get_type, + /* .get_props = */ ggml_backend_hexagon_device_get_props, + /* .init_backend = */ ggml_backend_hexagon_device_init, + /* .get_buffer_type = */ ggml_backend_hexagon_device_get_buffer_type, + /* .get_host_buffer_type = */ NULL, // ggml_backend_hexagon_device_get_host_buffer_type, + /* .buffer_from_host_ptr = */ NULL, // ggml_backend_hexagon_device_buffer_from_ptr, + /* .supports_op = */ ggml_backend_hexagon_device_supports_op, + /* .supports_buft = */ ggml_backend_hexagon_device_supports_buft, + /* .offload_op = */ NULL, // ggml_backend_hexagon_device_offload_op, + /* .event_new = */ NULL, + /* .event_free = */ NULL, + /* .event_synchronize = */ NULL, +}; + +//** backend registry + +#define GGML_HEXAGON_MAX_SESSIONS 16 + +struct ggml_hexagon_registry { + ggml_hexagon_registry(ggml_backend_reg_t reg); + ~ggml_hexagon_registry(); + + ggml_backend_device devices[GGML_HEXAGON_MAX_SESSIONS]; +}; + +ggml_hexagon_registry::ggml_hexagon_registry(ggml_backend_reg_t reg) { + GGML_LOG_INFO("ggml-hex: Hexagon backend (experimental) : allocating new registry : ndev %zu\n", opt_ndev); + + if (!opt_arch) { + int err = get_hex_arch_ver(CDSP_DOMAIN_ID, &opt_arch); + if (err != 0) { + GGML_LOG_ERROR("ggml-hex: failed to query HTP version (err %d) defaulting to v73\n", err); + opt_arch = 73; + } + } + + GGML_LOG_INFO("ggml-hex: Hexagon Arch version v%d\n", opt_arch); + + // Create devices / sessions + for (size_t i = 0; i < opt_ndev; i++) { + devices[i].iface = ggml_backend_hexagon_device_i; + devices[i].reg = reg; + try { + devices[i].context = new ggml_hexagon_session(i); + } catch (std::exception const &exc) { + GGML_LOG_ERROR("ggml-hex: failed to create device/session %zu\n", i); + devices[i].context = nullptr; + } + } +} + +ggml_hexagon_registry::~ggml_hexagon_registry() { + GGML_LOG_INFO("ggml-hex: releasing registry\n"); + + // Release devices / sessions + for (size_t i = 0; i < opt_ndev; i++) { + auto sess = static_cast(devices[i].context); + delete sess; + } +} + +static const char * ggml_backend_hexagon_reg_get_name(ggml_backend_reg_t reg) { + return "HTP"; + GGML_UNUSED(reg); +} + +static size_t ggml_backend_hexagon_reg_get_device_count(ggml_backend_reg_t reg) { + return opt_ndev; + GGML_UNUSED(reg); +} + +static ggml_backend_dev_t ggml_backend_hexagon_reg_get_device(ggml_backend_reg_t reg, size_t index) { + auto hreg = static_cast(reg->context); + + if (index >= opt_ndev || !hreg->devices[index].context) { + return nullptr; + } + + return &hreg->devices[index]; +} + +static void * ggml_backend_hexagon_get_proc_address(ggml_backend_reg_t reg, const char * name) { + if (strcmp(name, "ggml_backend_dev_get_extra_bufts") == 0) { + ggml_backend_dev_get_extra_bufts_t fct = ggml_backend_hexagon_device_get_extra_buffers_type; + return (void *) fct; + } + + return NULL; +} + +static void ggml_hexagon_init(ggml_backend_reg * reg) { + // Basic sanity checks to make sure definitions match + static_assert((unsigned int) HTP_TYPE_Q4_0 == (unsigned int) GGML_TYPE_Q4_0, + "please update hexagon_type to match ggml_type"); + static_assert((unsigned int) HTP_TYPE_Q8_0 == (unsigned int) GGML_TYPE_Q8_0, + "please update hexagon_type to match ggml_type"); + static_assert((unsigned int) HTP_TYPE_MXFP4 == (unsigned int) GGML_TYPE_MXFP4, + "please update hexagon_type to match ggml_type"); + + const char * str_verbose = getenv("GGML_HEXAGON_VERBOSE"); + const char * str_hostbuf = getenv("GGML_HEXAGON_HOSTBUF"); + + opt_verbose = str_verbose ? atoi(str_verbose) : 0; + opt_profile = getenv("GGML_HEXAGON_PROFILE") != nullptr; + opt_etm = getenv("GGML_HEXAGON_ETM") != nullptr; + opt_experimental = getenv("GGML_HEXAGON_EXPERIMENTAL") != nullptr; + + const char * str_opmask = getenv("GGML_HEXAGON_OPMASK"); + if (str_opmask != nullptr) { + opt_opmask = strtoul(str_opmask, NULL, 0); + } + opt_opsync = getenv("GGML_HEXAGON_OPSYNC") != nullptr; + + const char * str_ndev = getenv("GGML_HEXAGON_NDEV"); + if (str_ndev) { + opt_ndev = strtoul(str_ndev, NULL, 0); + if (opt_ndev > GGML_HEXAGON_MAX_SESSIONS) { + opt_ndev = GGML_HEXAGON_MAX_SESSIONS; + } + } + + const char * str_nhvx = getenv("GGML_HEXAGON_NHVX"); + if (str_nhvx) { + opt_nhvx = strtoul(str_nhvx, NULL, 0); + } + + const char * str_arch = getenv("GGML_HEXAGON_ARCH"); + if (str_arch) { + if (str_arch[0] == 'v') { + str_arch++; + } + opt_arch = strtoul(str_arch, NULL, 0); + } + + opt_hostbuf = str_hostbuf ? atoi(str_hostbuf) : 1; + + reg->context = new ggml_hexagon_registry(reg); + + HEX_VERBOSE("ggml-hex: size-of-general-req %zu size-of-general-rsp %zu\n", sizeof(struct htp_general_req), + sizeof(struct htp_general_rsp)); +} + +static const struct ggml_backend_reg_i ggml_backend_hexagon_reg_i = { + /* .get_name = */ ggml_backend_hexagon_reg_get_name, + /* .get_device_count = */ ggml_backend_hexagon_reg_get_device_count, + /* .get_device = */ ggml_backend_hexagon_reg_get_device, + /* .get_proc_address = */ ggml_backend_hexagon_get_proc_address, +}; + +ggml_backend_reg_t ggml_backend_hexagon_reg(void) { + static bool initialized = false; + + static ggml_backend_reg reg = { /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ ggml_backend_hexagon_reg_i, + /* .context = */ NULL }; + + { + static std::mutex mutex; + std::lock_guard lock(mutex); + if (!initialized) { + ggml_hexagon_init(®); + } + + initialized = true; + } + + return ® +} + +GGML_BACKEND_DL_IMPL(ggml_backend_hexagon_reg) diff --git a/ggml/src/ggml-hexagon/htp-utils.c b/ggml/src/ggml-hexagon/htp-utils.c new file mode 100644 index 0000000000000..e8a035af8c660 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp-utils.c @@ -0,0 +1,448 @@ + +#pragma clang diagnostic ignored "-Wgnu-anonymous-struct" +#pragma clang diagnostic ignored "-Wmissing-prototypes" +#pragma clang diagnostic ignored "-Wsign-compare" + +#define GGML_COMMON_IMPL_C +#include "ggml-backend-impl.h" +#include "ggml-common.h" +#include "ggml-hexagon.h" +#include "ggml-impl.h" + +#include "htp-utils.h" + +#include +#include +#include +#include +#include +#include +#include + +domain * get_domain(int domain_id) { + int i = 0; + int size = sizeof(supported_domains) / sizeof(domain); + + for (i = 0; i < size; i++) { + if (supported_domains[i].id == domain_id) { + return &supported_domains[i]; + } + } + + return NULL; +} + +bool is_valid_domain_id(int domain_id, int compute_only) { + int i = 0; + int size = sizeof(supported_domains) / sizeof(domain); + + if (compute_only) { + return is_CDSP(domain_id); + } + + for (i = 0; i < size; i++) { + if (supported_domains[i].id == domain_id) { + return true; + } + } + + return false; +} + +int get_domains_info(char * domain_type, int * num_domains, fastrpc_domain ** domains_info) { + int nErr = AEE_SUCCESS; + int ss_info = 0; + if (domain_type != NULL) { + if (strcmp(domain_type, "LPASS") == 0) { + ss_info = FASTRPC_LPASS; + } else if (strcmp(domain_type, "HPASS") == 0) { + ss_info = FASTRPC_HPASS; + } else { + ss_info = FASTRPC_NSP; + } + } + system_req_payload req = { 0 }; + req.id = FASTRPC_GET_DOMAINS; + req.sys.domains = NULL; + fastrpc_domain * domain = NULL; + if (ss_info != 0) { + req.sys.flags = DOMAINS_LIST_FLAGS_SET_TYPE(req.sys.flags, ss_info); + } else { + req.sys.flags = 0; + } +#ifdef _WIN32 + nErr = AEE_EUNSUPPORTED; + goto bail; +#endif + if (remote_system_request) { + nErr = remote_system_request(&req); + if (nErr != AEE_SUCCESS) { + GGML_LOG_ERROR("Failure in remote_system_request call: %d.\n", nErr); + goto bail; + } + // Allocate memory for domain-info array + req.sys.max_domains = req.sys.num_domains; + if ((req.sys.domains = calloc(req.sys.num_domains, sizeof(fastrpc_domain))) == NULL) { + nErr = AEE_ENOMEMORY; + GGML_LOG_ERROR("Unable to allocate memory for req.sys.domains"); + goto bail; + } + + nErr = remote_system_request(&req); + if (nErr != AEE_SUCCESS) { + GGML_LOG_ERROR("Failure in remote_system_request call: %d.\n", nErr); + goto bail; + } + + for (int i = 0; i < req.sys.num_domains; i++) { + // Verify that only requested type domains were returned + domain = &req.sys.domains[i]; + if (domain->type != ss_info && domain_type != NULL) { + nErr = -1; + GGML_LOG_ERROR("Incorrect data received from remote_system_request.\n"); + goto bail; + } + } + *domains_info = req.sys.domains; + *num_domains = req.sys.num_domains; + } else { + nErr = AEE_EUNSUPPORTED; + goto bail; + } +bail: + if (nErr && !req.sys.domains) { + free(req.sys.domains); + } + return nErr; +} + +int get_effective_domain_id(char * domain_name, int session_id, int * effec_domain_id) { + int err = 0; + remote_rpc_effective_domain_id_t sess = { 0 }; + + sess.domain_name = domain_name; + sess.domain_name_len = strlen(domain_name); + sess.session_id = session_id; + + err = remote_session_control(FASTRPC_GET_EFFECTIVE_DOMAIN_ID, &sess, sizeof(sess)); + if (err) { + GGML_LOG_ERROR("Error 0x%x: failed to get effective domain id for %s, session id %d\n", err, sess.domain_name, + session_id); + return err; + } + + *effec_domain_id = sess.effective_domain_id; + return err; +} + +int get_dsp_support(int * domain) { + int nErr = AEE_SUCCESS; + *domain = CDSP_DOMAIN_ID; // DSP domain default value is CDSP_DOMAIN_ID + + if (remote_handle_control) { + struct remote_dsp_capability dsp_capability_domain = { CDSP_DOMAIN_ID, DOMAIN_SUPPORT, 0 }; + nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_domain, sizeof(struct remote_dsp_capability)); + if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { + GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device\n"); + goto bail; + } + + if (dsp_capability_domain.capability == 0) { + dsp_capability_domain.domain = ADSP_DOMAIN_ID; // Check for ADSP support. + dsp_capability_domain.attribute_ID = DOMAIN_SUPPORT; + dsp_capability_domain.capability = 0; + nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_domain, + sizeof(struct remote_dsp_capability)); + if (dsp_capability_domain.capability) { + *domain = ADSP_DOMAIN_ID; // For targets like Agatti (not having cDSP), domain is ADSP_DOMAIN_ID + } + } + + if (nErr != AEE_SUCCESS) { + GGML_LOG_ERROR("\nget_dsp_support failed with Error 0x%x\n", nErr); + goto bail; + } + } else { + nErr = AEE_EUNSUPPORTEDAPI; + GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device\n"); + } + +bail: + return nErr; +} + +int get_vtcm_info(int domain, uint32_t * capability, uint32_t attr) { + int nErr = AEE_SUCCESS; + *capability = 0; + + if (attr == VTCM_PAGE || attr == VTCM_COUNT) { + } else { + nErr = AEE_EBADPARM; + GGML_LOG_ERROR("Unsupported attr. Only VTCM_PAGE and VTCM_COUNT supported\n"); + goto bail; + } + if (remote_handle_control) { + if (domain == ADSP_DOMAIN_ID || domain == CDSP_DOMAIN_ID) { + /* + * Query the DSP for VTCM information + * Since the ADSP does not have a dedicated VTCM, we expect the output to be 0 + */ + struct remote_dsp_capability dsp_capability_vtcm_dsp; + dsp_capability_vtcm_dsp.domain = (uint32_t) domain; + dsp_capability_vtcm_dsp.attribute_ID = attr; + dsp_capability_vtcm_dsp.capability = (uint32_t) 0; + nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_vtcm_dsp, + sizeof(struct remote_dsp_capability)); + if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { + GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device\n"); + GGML_LOG_ERROR("Running the usecase without checking the capability\n"); + nErr = AEE_SUCCESS; + goto bail; + } else if (nErr == AEE_SUCCESS) { + *capability = dsp_capability_vtcm_dsp.capability; + } else { + GGML_LOG_ERROR("\nget_vtcm_info failed with Error 0x%x\n", nErr); + goto bail; + } + } else { + nErr = AEE_EUNSUPPORTED; + GGML_LOG_ERROR("Unsupported domain %d\n", domain); + goto bail; + } + } else { + nErr = AEE_EUNSUPPORTEDAPI; + GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device\n"); + } + +bail: + return nErr; +} + +bool is_unsignedpd_supported(int domain_id) { + int nErr = AEE_SUCCESS; + if (remote_handle_control) { + struct remote_dsp_capability dsp_capability_domain = { domain_id, UNSIGNED_PD_SUPPORT, 0 }; + nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_domain, sizeof(struct remote_dsp_capability)); + if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { + GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device. Falling back to signed pd.\n"); + return false; + } + if (nErr) { + GGML_LOG_ERROR("\nERROR 0x%x: FastRPC Capability API failed. Falling back to signed pd.", nErr); + return false; + } + if (dsp_capability_domain.capability == 1) { + return true; + } + } else { + nErr = AEE_EUNSUPPORTEDAPI; + GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device. Falling back to signed pd.\n"); + return false; + } + return false; +} + +bool get_unsignedpd_support(void) { + return is_unsignedpd_supported(CDSP_DOMAIN_ID); +} + +bool is_async_fastrpc_supported(int domain) { + int nErr = AEE_SUCCESS; + if (remote_handle_control) { + if (domain == CDSP_DOMAIN_ID) { + /* + * Query the DSP for ASYNC_FASTRPC_SUPPORT information + * Async fastrpc is supported only on CDSP + */ + struct remote_dsp_capability dsp_capability_async_support; + dsp_capability_async_support.domain = (uint32_t) domain; + dsp_capability_async_support.attribute_ID = ASYNC_FASTRPC_SUPPORT; + dsp_capability_async_support.capability = (uint32_t) 0; + nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_async_support, + sizeof(struct remote_dsp_capability)); + if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { + GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device\n"); + GGML_LOG_ERROR("Running the usecase without checking the capability\n"); + nErr = AEE_SUCCESS; + goto bail; + } else if (dsp_capability_async_support.capability == 1) { + return true; + } + if (nErr != AEE_SUCCESS) { + GGML_LOG_ERROR("\nis_async_fastrpc_supported failed with Error 0x%x\n", nErr); + goto bail; + } + } else { + nErr = AEE_EUNSUPPORTED; + GGML_LOG_ERROR("Async fastrpc is not supported on domain %d\n", domain); + goto bail; + } + } else { + nErr = AEE_EUNSUPPORTEDAPI; + GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device\n"); + } + +bail: + return false; +} + +bool is_status_notification_supported(int domain) { + int nErr = AEE_SUCCESS; + + if (remote_handle_control) { + /* + * Query the DSP for STATUS_NOTIFICATION_SUPPORT information + * DSP User PD status notification Support + */ + struct remote_dsp_capability dsp_capability_status_notification_support; + dsp_capability_status_notification_support.domain = (uint32_t) domain; + dsp_capability_status_notification_support.attribute_ID = STATUS_NOTIFICATION_SUPPORT; + dsp_capability_status_notification_support.capability = (uint32_t) 0; + nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_status_notification_support, + sizeof(struct remote_dsp_capability)); + if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { + GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device\n"); + GGML_LOG_ERROR("Running the usecase without checking the capability\n"); + nErr = AEE_SUCCESS; + goto bail; + } else if (dsp_capability_status_notification_support.capability == 1) { + return true; + } + if (nErr != AEE_SUCCESS) { + GGML_LOG_ERROR("\nis_status_notification_supported failed with Error 0x%x\n", nErr); + goto bail; + } + } else { + nErr = AEE_EUNSUPPORTEDAPI; + GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device\n"); + } + +bail: + return false; +} + +int get_hmx_support_info(int domain, uint32_t * capability, uint32_t attr) { + int nErr = AEE_SUCCESS; + *capability = 0; + + if (attr != HMX_SUPPORT_SPATIAL && attr != HMX_SUPPORT_DEPTH) { + nErr = AEE_EBADPARM; + GGML_LOG_ERROR("Unsupported attr. Only HMX_SUPPORT_SPATIAL and HMX_SUPPORT_DEPTH supported\n"); + goto bail; + } + if (remote_handle_control) { + if (domain == CDSP_DOMAIN_ID) { + /* + * Query the DSP for HMX SUPPORT information + * HMX is supported on CDSP only + */ + struct remote_dsp_capability dsp_capability_hmx_dsp; + dsp_capability_hmx_dsp.domain = (uint32_t) domain; + dsp_capability_hmx_dsp.attribute_ID = attr; + dsp_capability_hmx_dsp.capability = (uint32_t) 0; + nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_hmx_dsp, + sizeof(struct remote_dsp_capability)); + if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { + GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device\n"); + GGML_LOG_ERROR("Running the usecase without checking the capability\n"); + nErr = AEE_SUCCESS; + goto bail; + } else if (nErr == AEE_SUCCESS) { + *capability = dsp_capability_hmx_dsp.capability; + } else { + GGML_LOG_ERROR("\nget_hmx_support_info failed with Error 0x%x\n", nErr); + goto bail; + } + } else { + nErr = AEE_EUNSUPPORTED; + GGML_LOG_ERROR("HMX support is not there for domain %d\n", domain); + goto bail; + } + } else { + nErr = AEE_EUNSUPPORTEDAPI; + GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device\n"); + } + +bail: + return nErr; +} + +int get_hex_arch_ver(int domain, int * arch) { + if (!remote_handle_control) { + GGML_LOG_ERROR("ggml-hex: remote_handle_control is not supported on this device\n"); + return AEE_EUNSUPPORTEDAPI; + } + + struct remote_dsp_capability arch_ver; + arch_ver.domain = (uint32_t) domain; + arch_ver.attribute_ID = ARCH_VER; + arch_ver.capability = (uint32_t) 0; + + int err = remote_handle_control(DSPRPC_GET_DSP_INFO, &arch_ver, sizeof(arch_ver)); + if ((err & 0xff) == (AEE_EUNSUPPORTEDAPI & 0xff)) { + GGML_LOG_ERROR("ggml-hex: FastRPC capability API is not supported on this device\n"); + return AEE_EUNSUPPORTEDAPI; + } + + if (err != AEE_SUCCESS) { + GGML_LOG_ERROR("ggml-hex: FastRPC capability query failed (err %d)\n", err); + return err; + } + + switch (arch_ver.capability & 0xff) { + case 0x73: + *arch = 73; + return 0; + case 0x75: + *arch = 75; + return 0; + case 0x79: + *arch = 79; + return 0; + case 0x81: + *arch = 81; + return 0; + } + return -1; +} + +int get_hvx_support_info(int domain, uint32_t * capability, uint32_t attr) { + int nErr = AEE_SUCCESS; + *capability = 0; + + if (remote_handle_control) { + if (domain == CDSP_DOMAIN_ID) { + /* + * Query the DSP for HVX SUPPORT information + * HVX is supported on CDSP only + */ + struct remote_dsp_capability dsp_capability_hvx_dsp; + dsp_capability_hvx_dsp.domain = (uint32_t) domain; + dsp_capability_hvx_dsp.attribute_ID = attr; + dsp_capability_hvx_dsp.capability = (uint32_t) 0; + nErr = remote_handle_control(DSPRPC_GET_DSP_INFO, &dsp_capability_hvx_dsp, + sizeof(struct remote_dsp_capability)); + if ((nErr & 0xFF) == (AEE_EUNSUPPORTEDAPI & 0xFF)) { + GGML_LOG_ERROR("\nFastRPC Capability API is not supported on this device\n"); + GGML_LOG_ERROR("Running the usecase without checking the capability\n"); + nErr = AEE_SUCCESS; + goto bail; + } else if (nErr == AEE_SUCCESS) { + *capability = dsp_capability_hvx_dsp.capability; + } else { + GGML_LOG_ERROR("\nget_hvx_support_info failed with Error 0x%x\n", nErr); + goto bail; + } + } else { + nErr = AEE_EUNSUPPORTED; + GGML_LOG_ERROR("HVX support is not available on domain %d\n", domain); + goto bail; + } + } else { + nErr = AEE_EUNSUPPORTEDAPI; + GGML_LOG_ERROR("remote_dsp_capability interface is not supported on this device\n"); + } + +bail: + return nErr; +} diff --git a/ggml/src/ggml-hexagon/htp-utils.h b/ggml/src/ggml-hexagon/htp-utils.h new file mode 100644 index 0000000000000..66f9fd373e2be --- /dev/null +++ b/ggml/src/ggml-hexagon/htp-utils.h @@ -0,0 +1,219 @@ +#ifndef HTP_UTILS_H +#define HTP_UTILS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +/* Offset to differentiate HLOS and Hexagon error codes. + Stores the value of AEE_EOFFSET for Hexagon. */ +#ifndef DSP_OFFSET +# define DSP_OFFSET 0x80000400 +#endif + +/* Errno for connection reset by peer. */ +#ifndef ECONNRESET +# ifdef __hexagon__ +# define ECONNRESET 104 +# endif +#endif + +/* Abstraction of different OS specific sleep APIs. + SLEEP accepts input in seconds. */ +#ifndef SLEEP +# ifdef __hexagon__ +# define SLEEP(x) \ + { /* Do nothing for simulator. */ \ + } +# else +# ifdef _WINDOWS +# define SLEEP(x) Sleep(1000 * x) /* Sleep accepts input in milliseconds. */ +# else +# define SLEEP(x) sleep(x) /* sleep accepts input in seconds. */ +# endif +# endif +#endif + +/* Include windows specific header files. */ +#ifdef _WINDOWS +# include +# include +# define _CRT_SECURE_NO_WARNINGS 1 +# define _WINSOCK_DEPRECATED_NO_WARNINGS 1 +/* Including this file for custom implementation of getopt function. */ +# include "getopt_custom.h" +#endif + +/* Includes and defines for all HLOS except windows */ +#if !defined(__hexagon__) && !defined(_WINDOWS) +# include "unistd.h" + +# include +#endif + +/* Includes and defines for Hexagon and all HLOS except Windows. */ +#if !defined(_WINDOWS) +/* Weak reference to remote symbol for compilation. */ +# pragma weak remote_session_control +# pragma weak remote_handle_control +# pragma weak remote_handle64_control +# pragma weak fastrpc_mmap +# pragma weak fastrpc_munmap +#endif + +#if !defined(_WINDOWS) +# pragma weak remote_system_request +#endif +/** + * Wrapper for FastRPC Capability API: query DSP support. + * + * @param[out] domain pointer to supported domain. + * @return 0 if query is successful. + * non-zero if error, return value points to the error. + */ +int get_dsp_support(int * domain); + +/** + * Wrapper for FastRPC Capability API: query VTCM information. + * + * @param[in] domain value of domain in the queried. + * @param[out] capability capability value of the attribute queried. + * @param[in] attr value of the attribute to the queried. + * @return 0 if query is successful. + * non-zero if error, return value points to the error. + */ +int get_vtcm_info(int domain, uint32_t * capability, uint32_t attr); + +/** + * Wrapper for FastRPC Capability API: query unsigned pd support on CDSP domain. + * + * @return true if unsigned pd is supported. + * false if unsigned pd is not supported, capability query failed. + */ + +bool get_unsignedpd_support(void); + +/** + * Wrapper for FastRPC Capability API: query unsigned pd support. + * + * @param[in] domain value of domain in the queried. + * @return true if unsigned pd is supported. + * false if unsigned pd is not supported, capability query failed. + */ + +bool is_unsignedpd_supported(int domain_id); + +/** + * is_valid_domain_id API: query a domain id is valid. + * + * @param[in] domain value of domain in the queried. + * @param[in] compute_only value of domain is only compared with CDSP domains supported by the target when enabled. + * @return true if value of domain is valid. + * false if value of domain is not valid. + */ + +bool is_valid_domain_id(int domain_id, int compute_only); + +/** + * get_domain API: get domain struct from domain value. + * + * @param[in] domain value of a domain + * @return Returns domain struct of the domain if it is supported or else + * returns NULL. + * + */ + +domain * get_domain(int domain_id); + +/** + * get_domains_info API: get information for all the domains available on the device + * + * @param[in] domain_type pointer to domain type + * @param[in] num_domains pointer to number of domains + * @param[in] domains_info pointer to save discovered domains information. + * @return 0 if query is successful. + * non-zero if error, return value points to the error. + * + * It is user's responsibility to free the memory used to store the domains info whose address is present in domains_info before closing the application. + * + */ + +int get_domains_info(char * domain_type, int * num_domains, fastrpc_domain ** domains_info); + +/** + * get_effective_domain_id API: get effective domain id for given session id + * + * @param[in] domain_name pointer to domain name + * @param[in] session_id + * @param[in] effec_domain_id pointer to save obtained effective domain id. + * @return 0 if query is successful. + * non-zero if error, return value points to the error. + * + */ + +int get_effective_domain_id(char * domain_name, int session_id, int * effec_domain_id); + +/** + * is_async_fastrpc_supported API: query a domain id has async fastrpc supported or not + * + * @param[in] domain_id value of a domain + * @return Returns true or false stating support of Async FastRPC + * + */ + +bool is_async_fastrpc_supported(int domain_id); + +/** + * is_status_notification_supported API: query the DSP for STATUS_NOTIFICATION_SUPPORT information + * + * @param[in] domain_id value of a domain + * @return Returns true or false stating status notification support information + * + */ +bool is_status_notification_supported(int domain_id); + +/** + * get_hmx_support_info API: query the DSP for HMX SUPPORT information + * + * @param[in] domain_id value of a domain + * @param[out] capability capability value of the attribute queried. + * @param[in] attr value of the attribute to the queried. + * @return 0 if query is successful. + * non-zero if error, return value points to the error. + * + */ +int get_hmx_support_info(int domain, uint32_t * capability, uint32_t attr); + +/** + * get_hex_arch_ver API: query the Hexagon processor architecture version information + * + * @param[in] domain_id value of a domain + * @param[out] Arch version (73, 75, ...) + * @return 0 if query is successful. + * non-zero if error, return value points to the error. + * + */ +int get_hex_arch_ver(int domain, int * arch); + +/** + * get_hvx_support_info API: query the DSP for HVX SUPPORT information + * + * @param[in] domain_id value of a domain + * @param[out] capability capability value of the attribute queried. + * @param[in] attr value of the attribute to the queried. + * @return 0 if query is successful. + * non-zero if error, return value points to the error. + * + */ +int get_hvx_support_info(int domain, uint32_t * capability, uint32_t attr); + +#ifdef __cplusplus +} +#endif + +#endif //DSP_CAPABILITIES_UTILS_H diff --git a/ggml/src/ggml-hexagon/htp/CMakeLists.txt b/ggml/src/ggml-hexagon/htp/CMakeLists.txt new file mode 100644 index 0000000000000..22e3fea11d85f --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/CMakeLists.txt @@ -0,0 +1,40 @@ +cmake_minimum_required(VERSION 3.22.2) +project(ggml-htp C CXX ASM) + +include(${HEXAGON_SDK_ROOT}/build/cmake/hexagon_fun.cmake) + +include_directories( + ${HEXAGON_SDK_ROOT}/incs + ${HEXAGON_SDK_ROOT}/incs/stddef + ${CMAKE_CURRENT_SOURCE_DIR}/../.. + ${CMAKE_CURRENT_SOURCE_DIR}/.. + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR}) + +set(HTP_LIB ggml-htp-${DSP_VERSION}) + +add_library(${HTP_LIB} SHARED + main.c + htp_iface_skel.c + worker-pool.c + htp-dma.c + hvx-sigmoid.c + hvx-inverse.c + hvx-exp.c + hvx-utils.c + matmul-ops.c + binary-ops.c + unary-ops.c + softmax-ops.c + act-ops.c + rope-ops.c +) + +target_compile_definitions(${HTP_LIB} PRIVATE + $,HTP_DEBUG=1,NDEBUG=1>) + +build_idl(htp_iface.idl ${HTP_LIB}) + +set_target_properties(${HTP_LIB} PROPERTIES EXPORT_COMPILE_COMMANDS ON) + +install(TARGETS ${HTP_LIB}) diff --git a/ggml/src/ggml-hexagon/htp/act-ops.c b/ggml/src/ggml-hexagon/htp/act-ops.c new file mode 100644 index 0000000000000..16044975d9253 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/act-ops.c @@ -0,0 +1,448 @@ +#pragma clang diagnostic ignored "-Wunused-variable" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wunused-but-set-variable" + +#ifdef HTP_DEBUG +# define FARF_HIGH 1 +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GGML_COMMON_DECL_C +#include "ggml-common.h" +#include "htp-ctx.h" +#include "htp-dma.h" +#include "htp-msg.h" +#include "htp-ops.h" +#include "hvx-utils.h" +#include "ops-utils.h" + +#define htp_act_preamble3 \ + const uint32_t ne00 = src0->ne[0]; \ + const uint32_t ne01 = src0->ne[1]; \ + const uint32_t ne02 = src0->ne[2]; \ + const uint32_t ne03 = src0->ne[3]; \ + \ + const uint32_t ne10 = src1->ne[0]; \ + const uint32_t ne11 = src1->ne[1]; \ + const uint32_t ne12 = src1->ne[2]; \ + const uint32_t ne13 = src1->ne[3]; \ + \ + const uint32_t ne0 = dst->ne[0]; \ + const uint32_t ne1 = dst->ne[1]; \ + const uint32_t ne2 = dst->ne[2]; \ + const uint32_t ne3 = dst->ne[3]; \ + \ + const uint32_t nb00 = src0->nb[0]; \ + const uint32_t nb01 = src0->nb[1]; \ + const uint32_t nb02 = src0->nb[2]; \ + const uint32_t nb03 = src0->nb[3]; \ + \ + const uint32_t nb10 = src1->nb[0]; \ + const uint32_t nb11 = src1->nb[1]; \ + const uint32_t nb12 = src1->nb[2]; \ + const uint32_t nb13 = src1->nb[3]; \ + \ + const uint32_t nb0 = dst->nb[0]; \ + const uint32_t nb1 = dst->nb[1]; \ + const uint32_t nb2 = dst->nb[2]; \ + const uint32_t nb3 = dst->nb[3]; + +#define htp_act_preamble2 \ + const uint32_t ne00 = src0->ne[0]; \ + const uint32_t ne01 = src0->ne[1]; \ + const uint32_t ne02 = src0->ne[2]; \ + const uint32_t ne03 = src0->ne[3]; \ + \ + const uint32_t ne0 = dst->ne[0]; \ + const uint32_t ne1 = dst->ne[1]; \ + const uint32_t ne2 = dst->ne[2]; \ + const uint32_t ne3 = dst->ne[3]; \ + \ + const uint32_t nb00 = src0->nb[0]; \ + const uint32_t nb01 = src0->nb[1]; \ + const uint32_t nb02 = src0->nb[2]; \ + const uint32_t nb03 = src0->nb[3]; \ + \ + const uint32_t nb0 = dst->nb[0]; \ + const uint32_t nb1 = dst->nb[1]; \ + const uint32_t nb2 = dst->nb[2]; \ + const uint32_t nb3 = dst->nb[3]; + +static void glu_swiglu_fp32_per_thread(const struct htp_tensor * src0, + const struct htp_tensor * src1, + struct htp_tensor * dst, + const int32_t * op_params, + struct htp_spad * src0_spad, + struct htp_spad * src1_spad, + struct htp_spad * dst_spad, + uint32_t nth, + uint32_t ith, + uint32_t src0_nrows_per_thread) { + htp_act_preamble3; + + size_t src0_row_size = nb01; + size_t src1_row_size = nb11; + size_t dst_row_size = nb1; + + const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows + + const uint32_t src0_start_row = src0_nrows_per_thread * ith; + const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); + + // no work for this thread + if (src0_start_row >= src0_end_row) { + return; + } + + uint64_t t1, t2; + t1 = HAP_perf_get_qtimer_count(); + + int is_aligned = 1; + int opt_path = 0; + if (!htp_is_aligned((void *) src0->data, VLEN) || !htp_is_aligned((void *) dst->data, VLEN)) { + is_aligned = 0; + FARF(HIGH, "swiglu-f32: unaligned addresses in elementwise op, possibly slower execution\n"); + } + if ((1 == is_aligned) && !(nb01 & (VLEN - 1))) { + opt_path = 1; + } + + const uint8_t * restrict data_src0 = (const uint8_t *) src0->data; + const uint8_t * restrict data_src1 = (const uint8_t *) src1->data; + uint8_t * restrict data_dst = (uint8_t *) dst->data; + + bool src1_valid = src1->ne[0]; + if (!src1_valid) { + data_src1 = data_src0; + src1_row_size = src0_row_size; + } + + uint8_t * restrict src0_spad_data = src0_spad->data + (ith * src0_row_size); + uint8_t * restrict src1_spad_data = src1_spad->data + (ith * src1_row_size); + uint8_t * restrict dst_spad_data = dst_spad->data + (ith * dst_row_size); + + const int32_t swapped = op_params[1]; + + const int nc = (src1_valid) ? ne0 : ne0 / 2; + + for (uint32_t ir = src0_start_row; ir < src0_end_row; ir++) { + const float * restrict src0 = (float *) (data_src0 + (ir * src0_row_size)); + const float * restrict src1 = (float *) (data_src1 + (ir * src1_row_size)); + float * restrict dst = (float *) (data_dst + (ir * dst_row_size)); + + if (ir + 1 < src0_end_row) { + htp_l2fetch(src0 + src0_row_size, 1, src0_row_size, src0_row_size); + } + + if (!src1_valid) { + src0 += swapped ? nc : 0; + src1 += swapped ? 0 : nc; + } + + if (1 == opt_path) { + hvx_fast_sigmoid_f32((const uint8_t *) src0, (uint8_t *) src0_spad_data, nc); + hvx_mul_mul_f32_opt((const uint8_t *) src0, (const uint8_t *) src0_spad_data, (const uint8_t *) src1, + (uint8_t *) dst, nc); + } else { + hvx_exp_f32((const uint8_t *) src0, src0_spad_data, nc, true); + hvx_add_scalar_f32(src0_spad_data, 1.0, src1_spad_data, nc); + hvx_inverse_f32(src1_spad_data, src0_spad_data, nc); + + hvx_mul_f32((const uint8_t *) src0, src0_spad_data, dst_spad_data, nc); + hvx_mul_f32(dst_spad_data, (const uint8_t *) src1, (uint8_t *) dst, nc); + } + } + + t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "swiglu-f32 %d/%d/%d: %ux%ux%ux%u (%u:%u) x %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", ith, nth, opt_path, + ne00, ne01, ne02, ne03, src0_start_row, src0_end_row, ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, + (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +static void glu_swiglu_oai_fp32_per_thread(const struct htp_tensor * src0, + const struct htp_tensor * src1, + struct htp_tensor * dst, + const int32_t * op_params, + struct htp_spad * src0_spad, + struct htp_spad * src1_spad, + struct htp_spad * dst_spad, + uint32_t nth, + uint32_t ith, + uint32_t src0_nrows_per_thread) { + htp_act_preamble3; + + uint64_t t1, t2; + t1 = HAP_perf_get_qtimer_count(); + + const size_t src0_row_size = nb01; + const size_t src1_row_size = nb11; + const size_t dst_row_size = nb1; + + const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows + + const uint32_t src0_start_row = src0_nrows_per_thread * ith; + const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); + + // no work for this thread + if (src0_start_row >= src0_end_row) { + return; + } + + if (!htp_is_aligned((void *) src0->data, VLEN) || !htp_is_aligned((void *) dst->data, VLEN)) { + FARF(HIGH, "act-f32: unaligned addresses in activations op, possibly slower execution\n"); + } + + const uint8_t * restrict data_src0 = (const uint8_t *) src0->data; + const uint8_t * restrict data_src1 = (const uint8_t *) src1->data; + uint8_t * restrict data_dst = (uint8_t *) dst->data; + + bool src1_valid = src1->ne[0]; + if (!src1_valid) { + data_src1 = data_src0; + } + + uint8_t * restrict src0_spad_data = src0_spad->data + (ith * src0_row_size); + uint8_t * restrict src1_spad_data = src1_spad->data + (ith * src1_row_size); + uint8_t * restrict dst_spad_data = dst_spad->data + (ith * dst_row_size); + + const int32_t swapped = op_params[1]; + const float alpha = ((const float *) (op_params))[2]; + const float limit = ((const float *) (op_params))[3]; + + const int nc = (src1_valid) ? ne0 : ne0 / 2; + + for (uint32_t ir = src0_start_row; ir < src0_end_row; ir++) { + const float * restrict src0 = (float *) (data_src0 + (ir * src0_row_size)); + const float * restrict src1 = (float *) (data_src1 + (ir * src1_row_size)); + float * restrict dst = (float *) (data_dst + (ir * dst_row_size)); + + if (ir + 1 < src0_end_row) { + htp_l2fetch(src0 + src0_row_size, 1, src0_row_size, src0_row_size); + } + + if (!src1) { + src0 += swapped ? nc : 0; + src1 += swapped ? 0 : nc; + } + + // x (src0_spad_data) = std::min(src0_p[k], limit); + hvx_min_scalar_f32((const uint8_t *) src0, limit, src0_spad_data, nc); + // y1 (src1_spad_data) = std::clamp(src1_p[k], -limit, limit); + hvx_clamp_scalar_f32((const uint8_t *) src1, limit, limit, src1_spad_data, nc); + // y (src1_spad_data) = y1 + 1.f + hvx_add_scalar_f32(src1_spad_data, 1.0, src1_spad_data, nc); + // x1 (dst_spad_data) = alpha * (x) + hvx_mul_scalar_f32(src0_spad_data, alpha, dst_spad_data, nc); + // x2 (dst_spad_data) = expf(-x1) + hvx_exp_f32(dst_spad_data, dst_spad_data, nc, true); + // x3 (dst_spad_data) = x2 + 1.f + hvx_add_scalar_f32(dst_spad_data, 1.0, dst_spad_data, nc); + // x4 (dst_spad_data) = 1 / x3 + hvx_inverse_f32(dst_spad_data, dst_spad_data, nc); + // out_glu(dst_spad_data) = x * x4 + hvx_mul_f32(src0_spad_data, dst_spad_data, dst_spad_data, nc); + // out = out_glu * (y + 1.f); + hvx_mul_f32(dst_spad_data, src1_spad_data, (uint8_t *) dst, nc); + } + + t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "swiglu-f32 %d/%d: %ux%ux%ux%u (%u:%u) x %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", ith, nth, src0->ne[0], + src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], src1->ne[1], src1->ne[2], + src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +static void unary_silu_fp32_per_thread(const struct htp_tensor * src0, + struct htp_tensor * dst, + const int32_t * op_params, + struct htp_spad * src0_spad, + struct htp_spad * dst_spad, + uint32_t nth, + uint32_t ith, + uint32_t src0_nrows_per_thread) { + htp_act_preamble2; + + uint64_t t1, t2; + t1 = HAP_perf_get_qtimer_count(); + + const size_t src0_row_size = nb01; + const size_t dst_row_size = nb1; + + const uint32_t src0_nrows = ne01 * ne02 * ne03; + + const uint32_t src0_start_row = src0_nrows_per_thread * ith; + const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); + + // no work for this thread + if (src0_start_row >= src0_end_row) { + return; + } + + int is_aligned = 1; + int opt_path = 0; + if (!htp_is_aligned((void *) src0->data, VLEN) || !htp_is_aligned((void *) dst->data, VLEN)) { + is_aligned = 0; + FARF(HIGH, "silu-f32: unaligned addresses in elementwise op, possibly slower execution\n"); + } + if ((1 == is_aligned) && !(nb01 & (VLEN - 1))) { + opt_path = 1; + } + + const uint8_t * restrict data_src0 = (const uint8_t *) src0->data; + uint8_t * restrict data_dst = (uint8_t *) dst->data; + + uint8_t * restrict src0_spad_data = src0_spad->data + (ith * src0_row_size); + uint8_t * restrict dst_spad_data = dst_spad->data + (ith * dst_row_size); + + for (uint32_t ir = src0_start_row; ir < src0_end_row; ir++) { + const float * restrict src0 = (float *) (data_src0 + (ir * src0_row_size)); + float * restrict dst = (float *) (data_dst + (ir * dst_row_size)); + + if (ir + 1 < src0_end_row) { + htp_l2fetch(src0 + src0_row_size, 1, src0_row_size, src0_row_size); + } + + if (1 == opt_path) { + hvx_fast_sigmoid_f32((const uint8_t *) src0, (uint8_t *) src0_spad_data, ne0); + hvx_mul_f32_opt((const uint8_t *) src0, src0_spad_data, (uint8_t *) dst, ne0); + } else { + hvx_exp_f32((const uint8_t *) src0, src0_spad_data, ne0, true); + hvx_add_scalar_f32(src0_spad_data, 1.0, dst_spad_data, ne0); + hvx_inverse_f32(dst_spad_data, src0_spad_data, ne0); + + hvx_mul_f32((const uint8_t *) src0, src0_spad_data, (uint8_t *) dst, ne0); + } + } + + t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "silu-f32 %d/%d/%d: %ux%ux%ux%u (%u:%u) -> %ux%ux%ux%u usec %u\n", ith, nth, opt_path, ne00, ne01, ne02, + ne03, src0_start_row, src0_end_row, ne0, ne1, ne2, ne3, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +static void unary_silu_fp32(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = (struct htp_ops_context *) data; + unary_silu_fp32_per_thread(&octx->src0, &octx->dst, octx->op_params, &octx->src0_spad, &octx->dst_spad, n, i, + octx->src0_nrows_per_thread); +} + +static void glu_swiglu_fp32(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = (struct htp_ops_context *) data; + glu_swiglu_fp32_per_thread(&octx->src0, &octx->src1, &octx->dst, octx->op_params, &octx->src0_spad, + &octx->src1_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread); +} + +static void glu_swiglu_oai_fp32(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = (struct htp_ops_context *) data; + glu_swiglu_oai_fp32_per_thread(&octx->src0, &octx->src1, &octx->dst, octx->op_params, &octx->src0_spad, + &octx->src1_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread); +} + +static int execute_op_activations_fp32(struct htp_ops_context * octx) { + int err = HTP_STATUS_OK; + + const struct htp_tensor * src0 = &octx->src0; + const struct htp_tensor * src1 = &octx->src1; + struct htp_tensor * dst = &octx->dst; + + if (((src0->ne[0] * SIZEOF_FP32) != src0->nb[1]) || ((dst->ne[0] * SIZEOF_FP32) != dst->nb[1])) { + FARF(ERROR, "Non-contiguous tensors are not supported at this time \n"); + return HTP_STATUS_NO_SUPPORT; + } + + worker_callback_t act_op_func; + const char * op_type = NULL; + + switch (octx->op) { + case HTP_OP_UNARY_SILU: + act_op_func = unary_silu_fp32; + op_type = "silu-f32"; + break; + + case HTP_OP_GLU_SWIGLU: + act_op_func = glu_swiglu_fp32; + op_type = "swiglu-f32"; + break; + + case HTP_OP_GLU_SWIGLU_OAI: + act_op_func = glu_swiglu_oai_fp32; + op_type = "swiglu-oai-f32"; + break; + + default: + FARF(ERROR, "Unsupported activations Op %u\n", octx->op); + return HTP_STATUS_NO_SUPPORT; + } + + const uint32_t n_threads = octx->n_threads; + const uint32_t src0_nrows = src0->ne[1] * src0->ne[2] * src0->ne[3]; + + const size_t src0_row_size = src0->nb[1]; + const size_t src1_row_size = src1->ne[0] ? src1->nb[1] : src0->nb[1]; + const size_t dst_row_size = dst->nb[1]; + + // VTCM scratchpads for all tensors + // N rows per thread, padded to HVX vector size + octx->dst_spad.size = htp_round_up(dst_row_size, 128) * octx->n_threads; + octx->src0_spad.size = htp_round_up(src0_row_size, 128) * octx->n_threads; + octx->src1_spad.size = htp_round_up(src1_row_size, 128) * octx->n_threads; + + size_t spad_size = octx->src0_spad.size + octx->src1_spad.size + octx->dst_spad.size; + + if (src1->ne[0]) { + FARF(HIGH, + "%s: %ux%ux%ux%u x %ux%ux%ux%u -> %ux%ux%ux%u : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", + op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], + src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], octx->src0_spad.size, octx->src1_spad.size, + octx->dst_spad.size); + } else { + FARF(HIGH, "%s: %ux%ux%ux%u -> %ux%ux%ux%u : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", op_type, + src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], + octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size); + } + + // Make sure the reserved vtcm size is sufficient + if (octx->ctx->vtcm_size < spad_size) { + FARF(ERROR, "act-%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, octx->ctx->vtcm_size, + spad_size); + return HTP_STATUS_VTCM_TOO_SMALL; + } + + octx->src0_spad.data = octx->ctx->vtcm_base; + octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size; + octx->dst_spad.data = octx->src1_spad.data + octx->src1_spad.size; + + if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { + uint32_t n_jobs = MIN(n_threads, src0_nrows); + + octx->src0_nrows_per_thread = (src0_nrows + n_jobs - 1) / n_jobs; + worker_pool_run_func(octx->ctx->worker_pool, act_op_func, octx, n_jobs); + } + + return err; +} + +int op_activations(struct htp_ops_context * octx) { + int err = HTP_STATUS_OK; + + switch (octx->src0.type) { + case HTP_TYPE_F32: + err = execute_op_activations_fp32(octx); + break; + + default: + err = HTP_STATUS_NO_SUPPORT; + break; + } + + return err; +} diff --git a/ggml/src/ggml-hexagon/htp/binary-ops.c b/ggml/src/ggml-hexagon/htp/binary-ops.c new file mode 100644 index 0000000000000..92c0109d28712 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/binary-ops.c @@ -0,0 +1,344 @@ +#pragma clang diagnostic ignored "-Wunused-variable" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wunused-but-set-variable" + +#ifdef HTP_DEBUG +# define FARF_HIGH 1 +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GGML_COMMON_DECL_C +#include "ggml-common.h" +#include "htp-ctx.h" +#include "htp-dma.h" +#include "htp-msg.h" +#include "htp-ops.h" +#include "hvx-utils.h" +#include "ops-utils.h" + +typedef void (*hvx_elemwise_f32_func)(const uint8_t * src0, + const uint8_t * src1, + uint8_t * data_dst, + const int num_elems); + +static hvx_elemwise_f32_func func_table_HVX[] = { hvx_mul_f32, hvx_add_f32, hvx_sub_f32 }; +static hvx_elemwise_f32_func func_table_HVX_opt[] = { hvx_mul_f32_opt, hvx_add_f32_opt, hvx_sub_f32_opt }; + +#define htp_binary_preamble \ + const uint32_t ne00 = src0->ne[0]; \ + const uint32_t ne01 = src0->ne[1]; \ + const uint32_t ne02 = src0->ne[2]; \ + const uint32_t ne03 = src0->ne[3]; \ + \ + const uint32_t ne10 = src1->ne[0]; \ + const uint32_t ne11 = src1->ne[1]; \ + const uint32_t ne12 = src1->ne[2]; \ + const uint32_t ne13 = src1->ne[3]; \ + \ + const uint32_t ne0 = dst->ne[0]; \ + const uint32_t ne1 = dst->ne[1]; \ + const uint32_t ne2 = dst->ne[2]; \ + const uint32_t ne3 = dst->ne[3]; \ + \ + const uint32_t nb00 = src0->nb[0]; \ + const uint32_t nb01 = src0->nb[1]; \ + const uint32_t nb02 = src0->nb[2]; \ + const uint32_t nb03 = src0->nb[3]; \ + \ + const uint32_t nb10 = src1->nb[0]; \ + const uint32_t nb11 = src1->nb[1]; \ + const uint32_t nb12 = src1->nb[2]; \ + const uint32_t nb13 = src1->nb[3]; \ + \ + const uint32_t nb0 = dst->nb[0]; \ + const uint32_t nb1 = dst->nb[1]; \ + const uint32_t nb2 = dst->nb[2]; \ + const uint32_t nb3 = dst->nb[3]; + +static void binary_job_f32_per_thread(const struct htp_tensor * src0, + const struct htp_tensor * src1, + struct htp_tensor * dst, + uint8_t * spad_data, + uint32_t nth, + uint32_t ith, + uint32_t src0_nrows_per_thread, + enum htp_op op) { + htp_binary_preamble; + + const size_t src0_row_size = nb01; + const size_t src1_row_size = nb11; + const size_t dst_row_size = nb1; + + const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows + const uint32_t src1_nrows = ne11 * ne12 * ne13; // src1 rows + + const uint32_t src0_start_row = src0_nrows_per_thread * ith; + const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); + + // no work for this thread + if (src0_start_row >= src0_end_row) { + return; + } + + uint64_t t1, t2; + t1 = HAP_perf_get_qtimer_count(); + + int is_aligned = 1; + int opt_path = 0; + if ((0 == htp_is_aligned((void *) src0->data, VLEN)) || (0 == htp_is_aligned((void *) src1->data, VLEN)) || + (0 == htp_is_aligned((void *) dst->data, VLEN))) { + FARF(HIGH, "binary-f32: unaligned addresses in elementwise op, possibly slower execution\n"); + is_aligned = 0; + } + if ((1 == is_aligned) && !(nb01 & (VLEN - 1))) { + opt_path = 1; + } + + hvx_elemwise_f32_func func_HVX = (1 == opt_path) ? func_table_HVX_opt[op] : func_table_HVX[op]; + + uint8_t * restrict spad_data_th = spad_data + (ith * src0_row_size); + + const uint32_t nr0 = ne00 / ne10; + + const uint8_t * restrict src0_ptr = (const uint8_t *) src0->data + (src0_start_row * src0_row_size); + uint8_t * restrict dst_ptr = (uint8_t *) dst->data + (src0_start_row * dst_row_size); + + const uint8_t * restrict data_src1 = (const uint8_t *) src1->data; + const uint8_t * restrict src1_ptr = NULL; + + for (uint32_t ir = src0_start_row; ir < src0_end_row; ir++) { + src1_ptr = data_src1 + (ir % src1_nrows) * src1_row_size; + + if (ir + 1 < src0_end_row) { + htp_l2fetch(src0_ptr + ne00, 1, src0_row_size, src0_row_size); + if (src1_row_size == src0_row_size) { + htp_l2fetch(src1_ptr, 1, src1_row_size, src1_row_size); + } + } + + if (nr0 > 1) { + if ((1 == is_aligned) && (nr0 == ne00)) { + hvx_bcast_fp32_a(spad_data_th, *(float *) src1_ptr, nr0); + } else { + for (uint32_t r = 0; r < nr0; r++) { + memcpy(spad_data_th + r * nb11, (const uint8_t *) src1_ptr, nb11); + } + } + func_HVX((const uint8_t *) src0_ptr, (const uint8_t *) spad_data_th, (uint8_t *) dst_ptr, ne00); + } else { + func_HVX((const uint8_t *) src0_ptr, (const uint8_t *) src1_ptr, (uint8_t *) dst_ptr, ne00); + } + + src0_ptr += src0_row_size; + dst_ptr += dst_row_size; + } + + t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "binary-f32 %d/%d/%d: %ux%ux%ux%u (%u:%u) x %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", ith, nth, opt_path, + ne00, ne01, ne02, ne03, src0_start_row, src0_end_row, ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, + (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +static void binary_add_id_job_f32_per_thread(const struct htp_tensor * src0, + const struct htp_tensor * src1, + const struct htp_tensor * src2, + struct htp_tensor * dst, + uint8_t * spad_data, + uint32_t nth, + uint32_t ith, + uint32_t src0_nrows_per_thread, + hvx_elemwise_f32_func func_HVX) { + htp_binary_preamble; + + const size_t src0_row_size = nb01; + const size_t src1_row_size = nb11; + const size_t dst_row_size = nb1; + + const uint32_t ne02_ne01 = ne02 * ne01; + const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows + + const uint32_t src0_start_row = src0_nrows_per_thread * ith; + const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); + + // no work for this thread + if (src0_start_row >= src0_end_row) { + return; + } + + uint64_t t1, t2; + t1 = HAP_perf_get_qtimer_count(); + + if ((0 == htp_is_aligned((void *) src0->data, VLEN)) || (0 == htp_is_aligned((void *) src1->data, VLEN)) || + (0 == htp_is_aligned((void *) dst->data, VLEN))) { + FARF(HIGH, "add-id-f32: unaligned addresses, possibly slower execution\n"); + } + + const uint8_t * restrict data_src0 = (const uint8_t *) src0->data; + const uint8_t * restrict data_src1 = (const uint8_t *) src1->data; + uint8_t * restrict data_dst = (uint8_t *) dst->data; + + for (uint32_t ir = src0_start_row; ir < src0_end_row; ir++) { + // src0 indices + const uint32_t i03 = ir / ne02_ne01; + const uint32_t i02 = (ir - i03 * ne02_ne01) / ne01; + const uint32_t i01 = (ir - i03 * ne02_ne01 - i02 * ne01); + + // src1 indices + const int i11 = *(int32_t *) ((char *) src2->data + i01 * src2->nb[0] + i02 * src2->nb[1]); + assert(i11 >= 0 && i11 < ne11); + + float * restrict dst_ptr = (float *) (data_dst + i03 * nb3 + i02 * nb2 + i01 * nb1); + const float * restrict src0_ptr = (const float *) (data_src0 + i03 * nb03 + i02 * nb02 + i01 * nb01); + const float * restrict src1_ptr = (const float *) (data_src1 + 0 + 0 + i11 * nb11); + + if (ir + 1 < src0_end_row) { + htp_l2fetch(src0_ptr + ne00, 1, src0_row_size, src0_row_size); + if (src1_row_size == src0_row_size) { + htp_l2fetch(src1_ptr + ne10, 1, src1_row_size, src1_row_size); + } + } + + const uint32_t nr0 = ne00 / ne10; + if (nr0 > 1) { + for (uint32_t r = 0; r < nr0; r++) { + memcpy(spad_data + r * nb10, (const uint8_t *) src1_ptr, nb10); + } + func_HVX((const uint8_t *) src0_ptr, (const uint8_t *) spad_data, (uint8_t *) dst_ptr, ne00); + } else { + func_HVX((const uint8_t *) src0_ptr, (const uint8_t *) src1_ptr, (uint8_t *) dst_ptr, ne00); + } + } + + t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "add-id-f32 %d/%d: %ux%ux%ux%u (%u:%u) x %ux%ux%ux%u (%ux%ux%ux%u) -> %ux%ux%ux%u usec %u\n", ith, nth, + src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], src1->ne[1], + src1->ne[2], src1->ne[3], src2->ne[0], src2->ne[1], src2->ne[2], src2->ne[3], dst->ne[0], dst->ne[1], + dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +static void binary_job_dispatcher_f32(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = (struct htp_ops_context *) data; + + switch (octx->op) { + case HTP_OP_MUL: + case HTP_OP_ADD: + case HTP_OP_SUB: + binary_job_f32_per_thread(&octx->src0, &octx->src1, &octx->dst, octx->src1_spad.data, n, i, + octx->src0_nrows_per_thread, octx->op); + break; + + case HTP_OP_ADD_ID: + binary_add_id_job_f32_per_thread(&octx->src0, &octx->src1, &octx->src2, &octx->dst, octx->src0_spad.data, n, + i, octx->src0_nrows_per_thread, hvx_add_f32); + break; + + default: + FARF(ERROR, "Unknown Binary Op %u", octx->op); + break; + } +} + +static int execute_op_binary_f32(struct htp_ops_context * octx) { + int err = HTP_STATUS_OK; + + const struct htp_tensor * src0 = &octx->src0; + const struct htp_tensor * src1 = &octx->src1; + struct htp_tensor * dst = &octx->dst; + + worker_callback_t binary_op_func; + const char * op_type = NULL; + + switch (octx->op) { + case HTP_OP_MUL: + binary_op_func = binary_job_dispatcher_f32; + op_type = "mul-f32"; + break; + + case HTP_OP_ADD: + binary_op_func = binary_job_dispatcher_f32; + op_type = "add-f32"; + break; + + case HTP_OP_SUB: + binary_op_func = binary_job_dispatcher_f32; + op_type = "sub-f32"; + break; + + case HTP_OP_ADD_ID: + binary_op_func = binary_job_dispatcher_f32; + op_type = "add-id-f32"; + break; + + default: + FARF(ERROR, "Unsupported binary-Op %u\n", octx->op); + return HTP_STATUS_NO_SUPPORT; + } + + const int n_threads = octx->n_threads; + const uint32_t src0_nrows = src0->ne[1] * src0->ne[2] * src0->ne[3]; + + const size_t src0_row_size = src0->nb[1]; + const size_t src1_row_size = src1->nb[1]; + const size_t dst_row_size = dst->nb[1]; + + // VTCM scratchpads for all tensors + octx->dst_spad.size = htp_round_up(dst_row_size, 128) * n_threads; + octx->src0_spad.size = htp_round_up(src0_row_size, 128) * n_threads; + octx->src1_spad.size = htp_round_up(src1_row_size, 128) * n_threads; + + size_t spad_size = octx->src0_spad.size + octx->src1_spad.size + octx->dst_spad.size; + + FARF(HIGH, + "%s: (%ux%ux%ux%u) * (%ux%ux%ux%u) -> (%ux%ux%ux%u) : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", + op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], + src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], octx->src0_spad.size, octx->src1_spad.size, + octx->dst_spad.size); + + // Make sure the reserved vtcm size is sufficient + if (octx->ctx->vtcm_size < spad_size) { + FARF(ERROR, "binary-%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, + octx->ctx->vtcm_size, spad_size); + return HTP_STATUS_VTCM_TOO_SMALL; + } + + octx->src0_spad.data = octx->ctx->vtcm_base; + octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size; + octx->dst_spad.data = octx->src1_spad.data + octx->src1_spad.size; + + if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { + uint32_t n_jobs = MIN(n_threads, src0_nrows); + + octx->src0_nrows_per_thread = (src0_nrows + n_jobs - 1) / n_jobs; + + worker_pool_run_func(octx->ctx->worker_pool, binary_op_func, octx, n_jobs); + } + + return err; +} + +int op_binary(struct htp_ops_context * octx) { + int err = HTP_STATUS_OK; + + switch (octx->src0.type) { + case HTP_TYPE_F32: + err = execute_op_binary_f32(octx); + break; + + default: + err = HTP_STATUS_NO_SUPPORT; + break; + } + + return err; +} diff --git a/ggml/src/ggml-hexagon/htp/cmake-toolchain.cmake b/ggml/src/ggml-hexagon/htp/cmake-toolchain.cmake new file mode 100644 index 0000000000000..7fa236e328f7e --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/cmake-toolchain.cmake @@ -0,0 +1,157 @@ +if (HEXAGON_TOOLCHAIN_INCLUDED) + return() +endif() +set(HEXAGON_TOOLCHAIN_INCLUDED true) + +#Cross Compiling for Hexagon +set(HEXAGON TRUE) +set(CMAKE_SYSTEM_NAME QURT) +set(CMAKE_SYSTEM_PROCESSOR Hexagon) +set(CMAKE_SYSTEM_VERSION "1") #${HEXAGON_PLATFORM_LEVEL}) +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) +set(CUSTOM_RUNELF_PATH "") + +#To fix backward compatibility with EAI addon. +if (NOT HEXAGON_SDK_ROOT) + set(HEXAGON_SDK_ROOT $ENV{HEXAGON_SDK_ROOT}) +endif() + +if (NOT HEXAGON_TOOLS_ROOT) + if (DEFINED ENV{HEXAGON_TOOLS_ROOT}) + set(HEXAGON_TOOLS_ROOT $ENV{HEXAGON_TOOLS_ROOT}) + endif() + if(NOT HEXAGON_TOOLS_ROOT) + set(HEXAGON_TOOLS_ROOT $ENV{DEFAULT_HEXAGON_TOOLS_ROOT}) + endif() +endif() + +file(TO_CMAKE_PATH "${HEXAGON_TOOLS_ROOT}" HEXAGON_TOOLS_ROOT) +file(TO_CMAKE_PATH "${HEXAGON_SDK_ROOT}" HEXAGON_SDK_ROOT) + +#Get the Binary extension of the Hexagon Toolchain +if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) + set(HEXAGON_TOOLCHAIN_SUFFIX .exe) +endif() +message(DEBUG "CMAKE_HOST_SYSTEM_NAME:${CMAKE_HOST_SYSTEM_NAME}") + +include(${HEXAGON_SDK_ROOT}/build/cmake/hexagon_arch.cmake) + +set(HEXAGON_TOOLCHAIN ${HEXAGON_TOOLS_ROOT}) +set(HEXAGON_LIB_DIR "${HEXAGON_TOOLCHAIN}/Tools/target/hexagon/lib") +set(HEXAGON_ISS_DIR ${HEXAGON_TOOLCHAIN}/Tools/lib/iss) + +set(CMAKE_TRY_COMPILE_PLATFORM_VARIABLES + HEXAGON_SDK_ROOT + HEXAGON_TOOLS_ROOT +) + +#QURT Related includes and linker flags +set(V_ARCH ${HEXAGON_ARCH}) +set(_QURT_INSTALL_DIR "${HEXAGON_SDK_ROOT}/rtos/qurt/ADSP${V_ARCH}MP${V_ARCH_EXTN}") +set(_QURT_INSTALL_DIR "${HEXAGON_SDK_ROOT}/rtos/qurt/compute${V_ARCH}${V_ARCH_EXTN}") + +if( ${TREE} MATCHES PAKMAN ) + set(_QURT_INSTALL_DIR "${QURT_IMAGE_DIR}/compute${V_ARCH}${V_ARCH_EXTN}") +endif() +message(DEBUG "_QURT_INSTALL_DIR:${_QURT_INSTALL_DIR}") +set(RTOS_DIR ${_QURT_INSTALL_DIR}) +set(QCC_DIR "${HEXAGON_QCC_DIR}/${V_ARCH}/G0") +set(TARGET_DIR "${HEXAGON_LIB_DIR}/${V_ARCH}/G0") + +include_directories( + ${_QURT_INSTALL_DIR}/include + ${_QURT_INSTALL_DIR}/include/qurt + ${_QURT_INSTALL_DIR}/include/posix + ) + +set(QURT_START_LINK_LIBS) +set(QURT_START_LINK_LIBS + "${TARGET_DIR}/init.o" + "${RTOS_DIR}/lib/crt1.o" + "${RTOS_DIR}/lib/debugmon.o" + "${RTOS_DIR}/lib/libqurt.a" + "${TARGET_DIR}/libc.a" + "${TARGET_DIR}/libqcc.a" + "${TARGET_DIR}/libhexagon.a" + "${RTOS_DIR}/lib/libqurtcfs.a" + "${RTOS_DIR}/lib/libtimer_island.a" + "${RTOS_DIR}/lib/libtimer_main.a" + "${RTOS_DIR}/lib/libposix.a" + ) +STRING(REPLACE ";" " " QURT_START_LINK_LIBS "${QURT_START_LINK_LIBS}") + +set(QURT_END_LINK_LIBS + ${TARGET_DIR}/fini.o + ) + +#Non QURT related includes and linker flags + +set(TARGET_DIR_NOOS "${HEXAGON_TOOLCHAIN}/Tools/target/hexagon/lib/${HEXAGON_ARCH}") + +if (NOT NO_WRAP_MEM_API) + set(WRAP_MALLOC -Wl,--wrap=malloc) + set(WRAP_CALLOC -Wl,--wrap=calloc) + set(WRAP_FREE -Wl,--wrap=free) + set(WRAP_REALLOC -Wl,--wrap=realloc) + set(WRAP_MEMALIGN -Wl,--wrap=memalign) +endif() + +set(PIC_SHARED_LD_FLAGS + -mcpu=${V_ARCH} -m${V_ARCH} -mhvx=${V_ARCH} + -G0 + -fpic + -Wl,-Bsymbolic + -Wl,-L${TARGET_DIR_NOOS}/G0/pic + -Wl,-L${HEXAGON_TOOLCHAIN}/Tools/target/hexagon/lib/ + -Wl,--no-threads ${WRAP_MALLOC} ${WRAP_CALLOC} ${WRAP_FREE} ${WRAP_REALLOC} ${WRAP_MEMALIGN} + -shared + "-o " + "" + -Wl,--start-group + "" + "" + -Wl,--end-group + -lc + ) +STRING(REPLACE ";" " " PIC_SHARED_LD_FLAGS "${PIC_SHARED_LD_FLAGS}") + +set(HEXAGON_PIC_SHARED_LINK_OPTIONS "${PIC_SHARED_LD_FLAGS}") + +#System include paths +include_directories(SYSTEM ${HEXAGON_SDK_ROOT}/incs) +include_directories(SYSTEM ${HEXAGON_SDK_ROOT}/incs/stddef) +include_directories(SYSTEM ${HEXAGON_SDK_ROOT}/ipc/fastrpc/incs) + +#LLVM toolchain setup +#Compiler paths, options and architecture +set(CMAKE_C_COMPILER ${HEXAGON_TOOLCHAIN}/Tools/bin/hexagon-clang${HEXAGON_TOOLCHAIN_SUFFIX}) +set(CMAKE_CXX_COMPILER ${HEXAGON_TOOLCHAIN}/Tools/bin/hexagon-clang++${HEXAGON_TOOLCHAIN_SUFFIX}) +set(CMAKE_AR ${HEXAGON_TOOLCHAIN}/Tools/bin/hexagon-ar${HEXAGON_TOOLCHAIN_SUFFIX}) +set(CMAKE_ASM_COMPILER ${HEXAGON_TOOLCHAIN}/Tools/bin/hexagon-clang++${HEXAGON_TOOLCHAIN_SUFFIX}) +set(HEXAGON_LINKER ${CMAKE_C_COMPILER}) +set(CMAKE_PREFIX_PATH ${HEXAGON_TOOLCHAIN}/Tools/target/hexagon) + +set(CMAKE_SHARED_LIBRARY_SONAME_C_FLAG "-Wl,-soname,") +set(CMAKE_SHARED_LIBRARY_SONAME_CXX_FLAG "-Wl,-soname,") + +#Compiler Options +set(COMMON_FLAGS "-mcpu=hexagon${V_ARCH} -m${V_ARCH} -mhvx=${V_ARCH} -fvectorize -Wall -Werror -fno-zero-initialized-in-bss -G0 -fdata-sections -fpic ${XQF_ARGS}") + +set(CMAKE_CXX_FLAGS_DEBUG "${COMMON_FLAGS} -O0 -D_DEBUG -g") +set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${COMMON_FLAGS} -O3 -g") +set(CMAKE_CXX_FLAGS_RELEASE "${COMMON_FLAGS} -O3") + +set(CMAKE_C_FLAGS_DEBUG "${COMMON_FLAGS} -O0 -D_DEBUG -g") +set(CMAKE_C_FLAGS_RELWITHDEBINFO "${COMMON_FLAGS} -O3 -g") +set(CMAKE_C_FLAGS_RELEASE "${COMMON_FLAGS} -O3") + +set(CMAKE_ASM_FLAGS_DEBUG "${COMMON_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG}") +set(CMAKE_ASM_FLAGS_RELEASE "${COMMON_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE}") +set(CMAKE_ASM_FLAGS_RELWITHDEBINFO "${COMMON_FLAGS} ${CMAKE_CXX_FLAGS_RELWITHDEBINFO}" ) + +#Linker Options +set(CMAKE_C_CREATE_SHARED_LIBRARY "${HEXAGON_LINKER} ${HEXAGON_PIC_SHARED_LINK_OPTIONS}") +set(CMAKE_CXX_CREATE_SHARED_LIBRARY "${HEXAGON_LINKER} ${HEXAGON_PIC_SHARED_LINK_OPTIONS}") diff --git a/ggml/src/ggml-hexagon/htp/htp-ctx.h b/ggml/src/ggml-hexagon/htp/htp-ctx.h new file mode 100644 index 0000000000000..5c3d217f1ccf1 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/htp-ctx.h @@ -0,0 +1,40 @@ +#ifndef HTP_CTX_H +#define HTP_CTX_H + +#include "htp-dma.h" +#include "worker-pool.h" + +#include +#include +#include +#include + +#define HTP_MAX_NTHREADS 10 + +// FIXME: move these into matmul-ops +#define HTP_SPAD_SRC0_NROWS 16 +#define HTP_SPAD_SRC1_NROWS 16 +#define HTP_SPAD_DST_NROWS 2 + +// Main context for htp DSP backend +struct htp_context { + dspqueue_t queue; + dma_queue * dma[HTP_MAX_NTHREADS]; + worker_pool_context_t worker_pool; + uint32_t n_threads; + + int thread_id; + int thread_prio; + + uint8_t * vtcm_base; + size_t vtcm_size; + uint32_t vtcm_rctx; + + atomic_bool vtcm_valid; + atomic_bool vtcm_inuse; + atomic_bool vtcm_needs_release; + + uint32_t opmask; +}; + +#endif /* HTP_CTX_H */ diff --git a/ggml/src/ggml-hexagon/htp/htp-dma.c b/ggml/src/ggml-hexagon/htp/htp-dma.c new file mode 100644 index 0000000000000..10c54b45ee239 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/htp-dma.c @@ -0,0 +1,69 @@ +#include "htp-dma.h" + +#include +#include +#include + +#pragma clang diagnostic ignored "-Wunused-function" + +static inline uint32_t pow2_ceil(uint32_t x) { + if (x <= 1) { + return 1; + } + int p = 2; + x--; + while (x >>= 1) { + p <<= 1; + } + return p; +} + +dma_queue * dma_queue_create(size_t capacity) { + dma_queue * q = (dma_queue *) memalign(32, sizeof(dma_queue)); + if (q == NULL) { + FARF(ERROR, "%s: failed to allocate DMA queue\n", __FUNCTION__); + return NULL; + } + + capacity = pow2_ceil(capacity); + + memset(q, 0, sizeof(dma_queue)); + q->capacity = capacity; + q->idx_mask = capacity - 1; + + q->desc = (hexagon_udma_descriptor_type1_t *) memalign(64, capacity * sizeof(hexagon_udma_descriptor_type1_t)); + memset(q->desc, 0, capacity * sizeof(hexagon_udma_descriptor_type1_t)); + + q->dst = (void **) memalign(4, capacity * sizeof(void *)); + memset(q->dst, 0, capacity * sizeof(void *)); + + q->tail = &q->desc[capacity - 1]; + + if (!q->desc && !q->dst) { + FARF(ERROR, "%s: failed to allocate DMA queue items\n", __FUNCTION__); + return NULL; + } + + FARF(HIGH, "dma-queue: capacity %u\n", capacity); + + return q; +} + +void dma_queue_delete(dma_queue * q) { + if (!q) { + return; + } + free(q->desc); + free(q->dst); + free(q); +} + +void dma_queue_flush(dma_queue * q) { + while (1) { + uint32_t s = dmwait() & 0x3; + if (s == HEXAGON_UDMA_DM0_STATUS_IDLE) { + break; + } + } + q->tail = NULL; +} diff --git a/ggml/src/ggml-hexagon/htp/htp-dma.h b/ggml/src/ggml-hexagon/htp/htp-dma.h new file mode 100644 index 0000000000000..4d0d54ce859da --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/htp-dma.h @@ -0,0 +1,119 @@ +#ifndef HTP_DMA_H +#define HTP_DMA_H + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + hexagon_udma_descriptor_type1_t * desc; // descriptor pointers + hexagon_udma_descriptor_type1_t * tail; // tail pointer + void ** dst; // dst pointers + uint32_t push_idx; + uint32_t pop_idx; + uint32_t capacity; + uint32_t idx_mask; +} dma_queue; + +dma_queue * dma_queue_create(size_t capacity); +void dma_queue_delete(dma_queue * q); +void dma_queue_flush(dma_queue * q); + +// TODO: technically we don't need these and could use Q6_dmstart/wait/etc instead +// but those do not seem to always compiler properly. +static inline void dmstart(void * next) { + asm volatile(" release(%0):at" : : "r"(next)); + asm volatile(" dmstart(%0)" : : "r"(next)); +} + +static inline void dmlink(void * cur, void * next) { + asm volatile(" release(%0):at" : : "r"(next)); + asm volatile(" dmlink(%0, %1)" : : "r"(cur), "r"(next)); +} + +static inline unsigned int dmpoll(void) { + unsigned int ret = 0; + asm volatile(" %0 = dmpoll" : "=r"(ret) : : "memory"); + return ret; +} + +static inline unsigned int dmwait(void) { + unsigned int ret = 0; + asm volatile(" %0 = dmwait" : "=r"(ret) : : "memory"); + return ret; +} + +static inline bool dma_queue_push(dma_queue * q, + void * dst, + const void * src, + size_t dst_row_size, + size_t src_row_size, + size_t nrows) { + if (((q->push_idx + 1) & q->idx_mask) == q->pop_idx) { + return false; + } + + hexagon_udma_descriptor_type1_t * desc = &q->desc[q->push_idx]; + + desc->next = NULL; + desc->length = 0; + desc->desctype = HEXAGON_UDMA_DESC_DESCTYPE_TYPE1; + desc->dstbypass = 1; + desc->srcbypass = 1; + desc->order = 0; + desc->dstate = HEXAGON_UDMA_DESC_DSTATE_INCOMPLETE; + desc->src = (void *) src; + desc->dst = (void *) dst; + desc->allocation = 0; + desc->padding = 0; + desc->roiwidth = src_row_size; + desc->roiheight = nrows; + desc->srcstride = src_row_size; + desc->dststride = dst_row_size; + desc->srcwidthoffset = 0; + desc->dstwidthoffset = 0; + + q->dst[q->push_idx] = dst; + + dmlink(q->tail, desc); + q->tail = desc; + + // FARF(ERROR, "dma-push: i %u len %u dst %p src %p\n", q->push_idx, len, dst, src); + q->push_idx = (q->push_idx + 1) & q->idx_mask; + return true; +} + +static inline uint8_t * dma_queue_pop(dma_queue * q) { + if (q->push_idx == q->pop_idx) { + return NULL; + } + + hexagon_udma_descriptor_type1_t * desc = &q->desc[q->pop_idx]; + + // Wait for desc to complete + while (1) { + dmpoll(); + if (desc->dstate == HEXAGON_UDMA_DESC_DSTATE_COMPLETE) { + break; + } + // FARF(ERROR, "dma-pop: waiting for DMA : %u\n", q->pop_idx); + } + + uint8_t * dst = (uint8_t *) q->dst[q->pop_idx]; + + // FARF(ERROR, "dma-pop: i %u dst %p\n", q->pop_idx, dst); + q->pop_idx = (q->pop_idx + 1) & q->idx_mask; + return dst; +} + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif /* HTP_DMA_H */ diff --git a/ggml/src/ggml-hexagon/htp/htp-msg.h b/ggml/src/ggml-hexagon/htp/htp-msg.h new file mode 100644 index 0000000000000..f23d578806867 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/htp-msg.h @@ -0,0 +1,156 @@ +#ifndef HTP_MSG_H +#define HTP_MSG_H + +#include + +// ggml-common.h must be included prio to this header + +// Mask to enable various stages of the Ops. +// Used for debugging and profiling. +enum { + HTP_OPMASK_QUEUE = (1 << 0), // Enable Queueing (ie calls into the DSP) + HTP_OPMASK_QUANTIZE = (1 << 1), // Enable Quantize + HTP_OPMASK_COMPUTE = (1 << 2), // Enable Compute +}; + +// Op flags +enum { + HTP_OPFLAGS_SKIP_QUANTIZE = (1 << 0), // Skip dynamic quantization (reuse quantized tensors) + HTP_OPFLAGS_SKIP_COMPUTE = (1 << 1), // Skip actual computation (used for profiling) + HTP_OPFLAGS_EARLY_WAKEUP = (1 << 2) // Send early wakeup notification +}; + +enum htp_status { + HTP_STATUS_OK = 1, + HTP_STATUS_INTERNAL_ERR = 2, + HTP_STATUS_NO_SUPPORT = 3, + HTP_STATUS_INVAL_PARAMS = 4, + HTP_STATUS_VTCM_TOO_SMALL = 5, +}; + +// The values must match the ggml_type. +// Duplicated here because we can't include full ggml.h in the htp build. +// We have some static_asserts in the cpp code to ensure things are in sync. +enum htp_data_type { + HTP_TYPE_F32 = 0, + HTP_TYPE_F16 = 1, + HTP_TYPE_Q4_0 = 2, + HTP_TYPE_Q8_0 = 8, + HTP_TYPE_MXFP4 = 39, + HTP_TYPE_COUNT +}; + +// These values are manually translated over to HTP +// !!!! DO NOT ALTER THE ORDER OF THE FIRST FOUR ENUMS !!!! +enum htp_op { + HTP_OP_MUL = 0, + HTP_OP_ADD = 1, + HTP_OP_SUB = 2, + HTP_OP_DIV = 3, + HTP_OP_MUL_MAT = 4, + HTP_OP_MUL_MAT_ID = 5, + HTP_OP_RMS_NORM = 6, + HTP_OP_UNARY_SILU = 7, + HTP_OP_GLU_SWIGLU = 8, + HTP_OP_GLU_SWIGLU_OAI = 9, + HTP_OP_SOFTMAX = 10, + HTP_OP_ADD_ID = 11, + HTP_OP_ROPE = 12, + INVALID +}; + +static inline size_t htp_type_block_size(uint32_t t) { + switch (t) { + case HTP_TYPE_F32: + return 1; + case HTP_TYPE_F16: + return 1; + case HTP_TYPE_Q4_0: + return QK4_0; + case HTP_TYPE_Q8_0: + return QK8_0; + case HTP_TYPE_MXFP4: + return QK_MXFP4; + default: + assert(0 && "unsupported HTP data type"); + } + return 0; +} + +static inline size_t htp_type_nbytes(uint32_t t) { + switch (t) { + case HTP_TYPE_F32: + return 4; + case HTP_TYPE_F16: + return 2; + case HTP_TYPE_Q4_0: + return sizeof(block_q4_0); + case HTP_TYPE_Q8_0: + return sizeof(block_q8_0); + case HTP_TYPE_MXFP4: + return sizeof(block_mxfp4); + default: + assert(0 && "unsupported HTP data type"); + } + return 0; +} + +static const char * htp_type_name(uint32_t t) { + switch (t) { + case HTP_TYPE_F32: + return "fp32"; + case HTP_TYPE_F16: + return "fp16"; + case HTP_TYPE_Q4_0: + return "q4_0"; + case HTP_TYPE_Q8_0: + return "q8_0"; + case HTP_TYPE_MXFP4: + return "mxfp4"; + } + return 0; +} + +// Internal types +#define QK_Q4_0x4x2 256 // 4x Q4_0 blocks packed with next 4x Q4_0 blocks (size in bytes 128) +#define QK_Q8_0x4x2 256 // 4x Q8_0 blocks concat with next 4x Q8_0 blocks +#define QK_MXFP4x4x2 256 // 4x MXFP4 blocks concat with next 4x MXFP4 blocks + +#define HTP_MAX_DIMS 4 + +struct htp_tensor { + uint32_t data; // Buffer offset in the messages, and data pointer on the NSP + uint32_t type; // Data type + uint32_t ne[HTP_MAX_DIMS]; // Number of elements + uint32_t nb[HTP_MAX_DIMS]; // Stride in bytes (see ggml.h ggml_tensor) +}; + +#define HTP_MAX_OP_PARAMS 64 + +struct htp_general_req { + uint32_t op; // GGML/HTP Op + int32_t op_params[HTP_MAX_OP_PARAMS / sizeof(int32_t)]; + // Params for the op, e.g. epsilon of RMS norm + uint32_t flags; // Request flags + + struct htp_tensor src0; // Input0 tensor + struct htp_tensor src1; // Input1 tensor + struct htp_tensor src2; // Input2 tensor + struct htp_tensor dst; // Output tensor + + // should be multiple of 64 bytes (cacheline) +}; + +struct htp_general_rsp { + uint32_t op; // GGML/HTP Op + uint32_t status; // HTP_STATUS_... + uint32_t prof_usecs; // Number of usec per request + uint32_t prof_cycles; // Number of cycles per request + uint32_t prof_pkts; // Number of instruction packets per request + uint8_t unused[44]; // Pad to 64 bytes +}; + +#define HTP_MAX_MESSAGE_SIZE sizeof(struct htp_general_req) +#define HTP_MAX_PACKET_BUFFERS 4 + +#endif /* HTP_MSG_H */ diff --git a/ggml/src/ggml-hexagon/htp/htp-ops.h b/ggml/src/ggml-hexagon/htp/htp-ops.h new file mode 100644 index 0000000000000..45723196791af --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/htp-ops.h @@ -0,0 +1,53 @@ +#ifndef HTP_OPS_H +#define HTP_OPS_H + +#include "htp-ctx.h" +#include "htp-msg.h" +#include "worker-pool.h" + +#include +#include + +// ggml-common.h must be included prior to this header + +struct htp_spad { + uint8_t * data; + size_t size; + size_t size_per_thread; +}; + +struct htp_ops_context { + struct htp_context * ctx; + + enum htp_op op; + int32_t op_params[HTP_MAX_OP_PARAMS / sizeof(int32_t)]; + + struct htp_tensor src0; + struct htp_tensor src1; + struct htp_tensor src2; + struct htp_tensor dst; + + struct htp_spad src0_spad; + struct htp_spad src1_spad; + struct htp_spad src2_spad; + struct htp_spad dst_spad; + + worker_pool_context_t * wpool; // worker pool + uint32_t n_threads; // num threads + + uint32_t src0_nrows_per_thread; + uint32_t src1_nrows_per_thread; + + uint32_t flags; +}; + +int op_matmul(struct htp_ops_context * octx); +int op_matmul_id(struct htp_ops_context * octx); +int op_binary(struct htp_ops_context * octx); +int op_unary(struct htp_ops_context * octx); +int op_activations(struct htp_ops_context * octx); +int op_softmax(struct htp_ops_context * octx); +int op_add_id(struct htp_ops_context * octx); +int op_rope(struct htp_ops_context * octx); + +#endif /* HTP_OPS_H */ diff --git a/ggml/src/ggml-hexagon/htp/htp_iface.idl b/ggml/src/ggml-hexagon/htp/htp_iface.idl new file mode 100644 index 0000000000000..9ebd937e46df9 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/htp_iface.idl @@ -0,0 +1,16 @@ +// FastRPC IDL interface for GGML HTP + +#ifndef HTP_IDL +#define HTP_IDL + +#include "AEEStdDef.idl" +#include "remote.idl" + +interface htp_iface : remote_handle64 { + AEEResult start(in uint32 sess_id, in uint64 dsp_queue_id, in uint32 n_hvx); + AEEResult stop(); + AEEResult enable_etm(); + AEEResult disable_etm(); +}; + +#endif /* HTP_IDL */ diff --git a/ggml/src/ggml-hexagon/htp/hvx-exp.c b/ggml/src/ggml-hexagon/htp/hvx-exp.c new file mode 100644 index 0000000000000..19f6795083c1d --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/hvx-exp.c @@ -0,0 +1,80 @@ +#pragma clang diagnostic ignored "-Wunused-variable" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wunused-but-set-variable" + +#include +#include +#include +#include + +#define GGML_COMMON_DECL_C +#include "ggml-common.h" +#include "htp-ctx.h" +#include "htp-dma.h" +#include "htp-msg.h" +#include "htp-ops.h" +#include "hvx-utils.h" +#include "ops-utils.h" + +void hvx_exp_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems, bool negate) { + int left_over = num_elems & (VLEN_FP32 - 1); + int num_elems_whole = num_elems - left_over; + + int unaligned_addr = 0; + int unaligned_loop = 0; + if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { + FARF(HIGH, "hvx_exp_f32: unaligned address in hvx op, possibly slower execution\n"); + unaligned_addr = 1; + } + // assert((0 == unaligned_addr) || (0 == num_elems_whole)); + if ((1 == unaligned_addr) && (num_elems_whole != 0)) { + unaligned_loop = 1; + FARF(HIGH, "hvx_exp_f32: unaligned loop in hvx op, possibly slower execution\n"); + } + + HVX_Vector vec_out = Q6_V_vzero(); + + if (0 == unaligned_loop) { + HVX_Vector * p_vec_in1 = (HVX_Vector *) src; + HVX_Vector * p_vec_out = (HVX_Vector *) dst; + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + if (true == negate) { + HVX_Vector neg_vec_in = hvx_vec_neg_fp32(*p_vec_in1++); + *p_vec_out++ = hvx_vec_exp_fp32(neg_vec_in); + } else { + *p_vec_out++ = hvx_vec_exp_fp32(*p_vec_in1++); + } + } + } else { + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); + + if (true == negate) { + HVX_Vector neg_vec_in = hvx_vec_neg_fp32(in); + *(HVX_UVector *) (dst + i * SIZEOF_FP32) = hvx_vec_exp_fp32(neg_vec_in); + } else { + *(HVX_UVector *) (dst + i * SIZEOF_FP32) = hvx_vec_exp_fp32(in); + } + } + } + + if (left_over > 0) { + const float * srcf = (float *) src + num_elems_whole; + float * dstf = (float *) dst + num_elems_whole; + + HVX_Vector in = *(HVX_UVector *) srcf; + + if (true == negate) { + HVX_Vector neg_vec_in = hvx_vec_neg_fp32(in); + + vec_out = hvx_vec_exp_fp32(neg_vec_in); + } else { + vec_out = hvx_vec_exp_fp32(in); + } + + hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, vec_out); + } +} diff --git a/ggml/src/ggml-hexagon/htp/hvx-inverse.c b/ggml/src/ggml-hexagon/htp/hvx-inverse.c new file mode 100644 index 0000000000000..4cf588a8781f1 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/hvx-inverse.c @@ -0,0 +1,60 @@ +#pragma clang diagnostic ignored "-Wunused-variable" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wunused-but-set-variable" + +#include +#include +#include +#include + +#define GGML_COMMON_DECL_C +#include "ggml-common.h" +#include "htp-ctx.h" +#include "htp-dma.h" +#include "htp-msg.h" +#include "htp-ops.h" +#include "hvx-utils.h" +#include "ops-utils.h" + +void hvx_inverse_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems) { + int left_over = num_elems & (VLEN_FP32 - 1); + int num_elems_whole = num_elems - left_over; + + int unaligned_addr = 0; + int unaligned_loop = 0; + if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { + FARF(HIGH, "hvx_inverse_f32: unaligned address in hvx op, possibly slower execution\n"); + unaligned_addr = 1; + } + // assert((0 == unaligned_addr) || (0 == num_elems_whole)); + if ((1 == unaligned_addr) && (num_elems_whole != 0)) { + unaligned_loop = 1; + FARF(HIGH, "hvx_inverse_f32: unaligned loop in hvx op, possibly slower execution\n"); + } + + if (0 == unaligned_loop) { + HVX_Vector * p_vec_in = (HVX_Vector *) src; + HVX_Vector * p_vec_out = (HVX_Vector *) dst; + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + *p_vec_out++ = hvx_vec_inverse_fp32(*p_vec_in++); + } + } else { + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); + *(HVX_UVector *) (dst + i * SIZEOF_FP32) = hvx_vec_inverse_fp32(in); + } + } + + if (left_over > 0) { + const float * srcf = (float *) src + num_elems_whole; + float * dstf = (float *) dst + num_elems_whole; + + HVX_Vector in = *(HVX_UVector *) srcf; + HVX_Vector out = hvx_vec_inverse_fp32(in); + + hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, out); + } +} diff --git a/ggml/src/ggml-hexagon/htp/hvx-sigmoid.c b/ggml/src/ggml-hexagon/htp/hvx-sigmoid.c new file mode 100644 index 0000000000000..15ac64697c711 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/hvx-sigmoid.c @@ -0,0 +1,49 @@ +#pragma clang diagnostic ignored "-Wunused-variable" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wunused-but-set-variable" + +#include +#include +#include +#include + +#define GGML_COMMON_DECL_C +#include "ggml-common.h" +#include "htp-ctx.h" +#include "htp-dma.h" +#include "htp-msg.h" +#include "htp-ops.h" +#include "hvx-utils.h" +#include "ops-utils.h" + +#if 0 +// Reference algo used in hvx-utils +static void fast_sigmoid_f32(const float* restrict src, float* restrict dst, const int num_elems) +{ + const float c1 = 0.03138777; + const float c2 = 0.276281267; + const float c_log2f = 1.442695022; + + int32_t store_ints[32]; + float store_floats[3][32]; + + for (int i = 0; i < num_elems; i++) + { + float v = src0[i]; + + v *= c_log2f*0.5; + int intPart = (int)v; + float x = (v - intPart); + float xx = x * x; + float v1 = c_log2f + c2 * xx; + float v2 = x + xx * c1 * x; + float v3 = (v2 + v1); + *((int*)&v3) += intPart << 24; + float v4 = v2 - v1; + float v5 = v3 - v4; + float res = v3 / v5; + + dst[i] = res; + } +} +#endif diff --git a/ggml/src/ggml-hexagon/htp/hvx-utils.c b/ggml/src/ggml-hexagon/htp/hvx-utils.c new file mode 100644 index 0000000000000..d3599bc9c1276 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/hvx-utils.c @@ -0,0 +1,947 @@ +#pragma clang diagnostic ignored "-Wunused-variable" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wunused-but-set-variable" + +#ifdef HTP_DEBUG +# define FARF_HIGH 1 +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#define GGML_COMMON_DECL_C +#include "ggml-common.h" +#include "hvx-utils.h" + +#define htp_binary_ops_preamble \ + int step_of_4 = num_elems >> 7; \ + int step_of_2 = (num_elems - step_of_4 * VLEN_FP32 * 4) >> 6; \ + int step_of_1 = (num_elems - step_of_4 * VLEN_FP32 * 4 - step_of_2 * VLEN_FP32 * 2) >> 5; \ + int remaining = num_elems - step_of_4 * VLEN_FP32 * 4 - step_of_2 * VLEN_FP32 * 2 - step_of_1 * VLEN_FP32; \ + \ + const uint8_t * restrict src0_curr = src0; \ + const uint8_t * restrict src1_curr = src1; \ + uint8_t * restrict dst_curr = dst; + +void hvx_mul_f32(const uint8_t * restrict src0, + const uint8_t * restrict src1, + uint8_t * restrict dst, + const int num_elems) { + int left_over = num_elems & (VLEN_FP32 - 1); + int num_elems_whole = num_elems - left_over; + + int unaligned_addr = 0; + int unaligned_loop = 0; + if ((0 == htp_is_aligned((void *) src0, VLEN)) || (0 == htp_is_aligned((void *) src1, VLEN)) || + (0 == htp_is_aligned((void *) dst, VLEN))) { + FARF(HIGH, "hvx_mul_f32: unaligned address in hvx op, possibly slower execution\n"); + unaligned_addr = 1; + } + + if ((1 == unaligned_addr) && (num_elems_whole != 0)) { + unaligned_loop = 1; + FARF(HIGH, "hvx_mul_f32: unaligned loop in hvx op, possibly slower execution\n"); + } + + if (0 == unaligned_loop) { + HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0; + HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1; + HVX_Vector * restrict vec_out = (HVX_Vector *) dst; + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1++, *vec_in2++); + *vec_out++ = Q6_Vsf_equals_Vqf32(v); + } + } else { + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector in1 = *(HVX_UVector *) (src0 + i * SIZEOF_FP32); + HVX_Vector in2 = *(HVX_UVector *) (src1 + i * SIZEOF_FP32); + + HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in1, in2); + + *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out); + } + } + + if (left_over > 0) { + const float * src0f = (const float *) src0 + num_elems_whole; + const float * src1f = (const float *) src1 + num_elems_whole; + float * dstf = (float *) dst + num_elems_whole; + + HVX_Vector in1 = *(HVX_UVector *) src0f; + HVX_Vector in2 = *(HVX_UVector *) src1f; + + HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in1, in2); + hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out)); + } +} + +void hvx_mul_f32_opt(const uint8_t * restrict src0, + const uint8_t * restrict src1, + uint8_t * restrict dst, + const int num_elems) { + htp_binary_ops_preamble; + + for (int i = 0; i < step_of_4; i++) { + HVX_Vector v1a = *(HVX_Vector *) src0_curr; + + HVX_Vector v1b = *(HVX_Vector *) src1_curr; + + HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); + + HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(v1a, v1b); + + HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); + + HVX_Vector v3a = *(HVX_Vector *) (src0_curr + 2 * VLEN); + + HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v2a, v2b); + + *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); + + HVX_Vector v3b = *(HVX_Vector *) (src1_curr + 2 * VLEN); + + HVX_Vector v4a = *(HVX_Vector *) (src0_curr + 3 * VLEN); + + src0_curr += 4 * VLEN; + + HVX_Vector v3 = Q6_Vqf32_vmpy_VsfVsf(v3a, v3b); + + *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); + + HVX_Vector v4b = *(HVX_Vector *) (src1_curr + 3 * VLEN); + + *(HVX_Vector *) (dst_curr + 2 * VLEN) = Q6_Vsf_equals_Vqf32(v3); + + HVX_Vector v4 = Q6_Vqf32_vmpy_VsfVsf(v4a, v4b); + + src1_curr += 4 * VLEN; + + *(HVX_Vector *) (dst_curr + 3 * VLEN) = Q6_Vsf_equals_Vqf32(v4); + + dst_curr += 4 * VLEN; + } + + for (int i = 0; i < step_of_2; i++) { + HVX_Vector v1a = *(HVX_Vector *) src0_curr; + + HVX_Vector v1b = *(HVX_Vector *) src1_curr; + + HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); + + HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(v1a, v1b); + + HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); + + *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); + + src0_curr += 2 * VLEN; + + HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v2a, v2b); + + src1_curr += 2 * VLEN; + + *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); + + dst_curr += 2 * VLEN; + } + + for (int i = 0; i < step_of_1; i++) { + HVX_Vector va = *(HVX_Vector *) src0_curr; + + src0_curr += VLEN; + + HVX_Vector vb = *(HVX_Vector *) src1_curr; + + src1_curr += VLEN; + + HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(va, vb); + + *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v); + + dst_curr += VLEN; + } + + if (remaining > 0) { + HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr); + hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v)); + } +} + +void hvx_mul_mul_f32_opt(const uint8_t * restrict src0, + const uint8_t * restrict src1, + const uint8_t * restrict src2, + uint8_t * restrict dst, + const int num_elems) { + const uint8_t * restrict src0_curr = src0; + const uint8_t * restrict src1_curr = src1; + const uint8_t * restrict src2_curr = src2; + uint8_t * restrict dst_curr = dst; + + int step_of_2 = num_elems >> 6; + int step_of_1 = (num_elems - step_of_2 * VLEN_FP32 * 2) >> 5; + int remaining = num_elems - step_of_2 * VLEN_FP32 * 2 - step_of_1 * VLEN_FP32; + + for (int i = 0; i < step_of_2; i++) { + HVX_Vector v1a = *(HVX_Vector *) src0_curr; + HVX_Vector v1b = *(HVX_Vector *) src1_curr; + HVX_Vector v1c = *(HVX_Vector *) src2_curr; + + HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); + + HVX_Vector v1_ = Q6_Vqf32_vmpy_VsfVsf(v1a, v1b); + HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v1_), v1c); + + HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); + + *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); + + HVX_Vector v2c = *(HVX_Vector *) (src2_curr + VLEN); + + src0_curr += 2 * VLEN; + + HVX_Vector v2_ = Q6_Vqf32_vmpy_VsfVsf(v2a, v2b); + HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v2_), v2c); + + src1_curr += 2 * VLEN; + src2_curr += 2 * VLEN; + + *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); + + dst_curr += 2 * VLEN; + } + for (int i = 0; i < step_of_1; i++) { + HVX_Vector va = *(HVX_Vector *) src0_curr; + src0_curr += VLEN; + + HVX_Vector vb = *(HVX_Vector *) src1_curr; + src1_curr += VLEN; + + HVX_Vector vc = *(HVX_Vector *) src2_curr; + src2_curr += VLEN; + + HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(va, vb); + HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v1), vc); + + *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v2); + dst_curr += VLEN; + } + if (remaining > 0) { + HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr); + HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v1), *(HVX_Vector *) src2_curr); + hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v2)); + } +} + +void hvx_add_f32(const uint8_t * restrict src0, + const uint8_t * restrict src1, + uint8_t * restrict dst, + const int num_elems) { + int left_over = num_elems & (VLEN_FP32 - 1); + int num_elems_whole = num_elems - left_over; + + int unaligned_addr = 0; + int unaligned_loop = 0; + if ((0 == htp_is_aligned((void *) src0, VLEN)) || (0 == htp_is_aligned((void *) src1, VLEN)) || + (0 == htp_is_aligned((void *) dst, VLEN))) { + FARF(HIGH, "hvx_add_f32: unaligned address in hvx op, possibly slower execution\n"); + unaligned_addr = 1; + } + + if ((1 == unaligned_addr) && (num_elems_whole != 0)) { + unaligned_loop = 1; + FARF(HIGH, "hvx_add_f32: unaligned loop in hvx op, possibly slower execution\n"); + } + + if (0 == unaligned_loop) { + HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0; + HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1; + HVX_Vector * restrict vec_out = (HVX_Vector *) dst; + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(*vec_in1++, *vec_in2++); + *vec_out++ = Q6_Vsf_equals_Vqf32(v); + } + } else { + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector in1 = *(HVX_UVector *) (src0 + i * SIZEOF_FP32); + HVX_Vector in2 = *(HVX_UVector *) (src1 + i * SIZEOF_FP32); + + HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in1, in2); + + *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out); + } + } + + if (left_over > 0) { + const float * src0f = (const float *) src0 + num_elems_whole; + const float * src1f = (const float *) src1 + num_elems_whole; + float * dstf = (float *) dst + num_elems_whole; + + HVX_Vector in1 = *(HVX_UVector *) src0f; + HVX_Vector in2 = *(HVX_UVector *) src1f; + + HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in1, in2); + hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out)); + } +} + +void hvx_add_f32_opt(const uint8_t * restrict src0, + const uint8_t * restrict src1, + uint8_t * restrict dst, + const int num_elems) { + htp_binary_ops_preamble; + + for (int i = 0; i < step_of_4; i++) { + HVX_Vector v1a = *(HVX_Vector *) src0_curr; + + HVX_Vector v1b = *(HVX_Vector *) src1_curr; + + HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); + + HVX_Vector v1 = Q6_Vqf32_vadd_VsfVsf(v1a, v1b); + + HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); + + HVX_Vector v3a = *(HVX_Vector *) (src0_curr + 2 * VLEN); + + HVX_Vector v2 = Q6_Vqf32_vadd_VsfVsf(v2a, v2b); + + *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); + + HVX_Vector v3b = *(HVX_Vector *) (src1_curr + 2 * VLEN); + + HVX_Vector v4a = *(HVX_Vector *) (src0_curr + 3 * VLEN); + + src0_curr += 4 * VLEN; + + HVX_Vector v3 = Q6_Vqf32_vadd_VsfVsf(v3a, v3b); + + *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); + + HVX_Vector v4b = *(HVX_Vector *) (src1_curr + 3 * VLEN); + + *(HVX_Vector *) (dst_curr + 2 * VLEN) = Q6_Vsf_equals_Vqf32(v3); + + HVX_Vector v4 = Q6_Vqf32_vadd_VsfVsf(v4a, v4b); + + src1_curr += 4 * VLEN; + + *(HVX_Vector *) (dst_curr + 3 * VLEN) = Q6_Vsf_equals_Vqf32(v4); + + dst_curr += 4 * VLEN; + } + for (int i = 0; i < step_of_2; i++) { + HVX_Vector v1a = *(HVX_Vector *) src0_curr; + + HVX_Vector v1b = *(HVX_Vector *) src1_curr; + + HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); + + HVX_Vector v1 = Q6_Vqf32_vadd_VsfVsf(v1a, v1b); + + HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); + + *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); + + src0_curr += 2 * VLEN; + + HVX_Vector v2 = Q6_Vqf32_vadd_VsfVsf(v2a, v2b); + + src1_curr += 2 * VLEN; + + *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); + + dst_curr += 2 * VLEN; + } + for (int i = 0; i < step_of_1; i++) { + HVX_Vector va = *(HVX_Vector *) src0_curr; + + src0_curr += VLEN; + + HVX_Vector vb = *(HVX_Vector *) src1_curr; + + src1_curr += VLEN; + + HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(va, vb); + + *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v); + + dst_curr += VLEN; + } + if (remaining > 0) { + HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr); + hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v)); + } +} + +void hvx_add_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) { + size_t left_over = num_elems & (VLEN_FP32 - 1); + size_t num_elems_whole = num_elems - left_over; + + int unaligned_addr = 0; + int unaligned_loop = 0; + if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { + FARF(HIGH, "hvx_add_scalar_f32: unaligned address in hvx op, possibly slower execution\n"); + unaligned_addr = 1; + } + + if ((1 == unaligned_addr) && (num_elems_whole != 0)) { + unaligned_loop = 1; + FARF(HIGH, "hvx_add_scalar_f32: unaligned loop in hvx op, possibly slower execution\n"); + } + + HVX_Vector val_vec = hvx_vec_splat_fp32(val); + + if (0 == unaligned_loop) { + HVX_Vector * restrict vec_in1 = (HVX_Vector *) src; + HVX_Vector * restrict vec_out = (HVX_Vector *) dst; + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(*vec_in1++, val_vec); + *vec_out++ = Q6_Vsf_equals_Vqf32(v); + } + } else { + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); + + HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in, val_vec); + + *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out); + } + } + + if (left_over > 0) { + const float * srcf = (const float *) src + num_elems_whole; + float * dstf = (float *) dst + num_elems_whole; + + HVX_Vector in = *(HVX_UVector *) srcf; + + HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in, val_vec); + hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out)); + } +} + +void hvx_mul_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) { + size_t left_over = num_elems & (VLEN_FP32 - 1); + size_t num_elems_whole = num_elems - left_over; + + int unaligned_addr = 0; + int unaligned_loop = 0; + if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { + FARF(HIGH, "hvx_mul_scalar_f32: unaligned address in hvx op, possibly slower execution\n"); + unaligned_addr = 1; + } + + if ((1 == unaligned_addr) && (num_elems_whole != 0)) { + unaligned_loop = 1; + FARF(HIGH, "hvx_mul_scalar_f32: unaligned loop in hvx op, possibly slower execution\n"); + } + + HVX_Vector val_vec = hvx_vec_splat_fp32(val); + + if (0 == unaligned_loop) { + HVX_Vector * restrict vec_in1 = (HVX_Vector *) src; + HVX_Vector * restrict vec_out = (HVX_Vector *) dst; + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1++, val_vec); + *vec_out++ = Q6_Vsf_equals_Vqf32(v); + } + } else { + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); + + HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in, val_vec); + + *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out); + } + } + + if (left_over > 0) { + const float * srcf = (const float *) src + num_elems_whole; + float * dstf = (float *) dst + num_elems_whole; + + HVX_Vector in = *(HVX_UVector *) srcf; + + HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in, val_vec); + hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out)); + } +} + +void hvx_sub_f32(const uint8_t * restrict src0, + const uint8_t * restrict src1, + uint8_t * restrict dst, + const int num_elems) { + size_t left_over = num_elems & (VLEN_FP32 - 1); + size_t num_elems_whole = num_elems - left_over; + + int unaligned_addr = 0; + int unaligned_loop = 0; + if ((0 == htp_is_aligned((void *) src0, VLEN)) || (0 == htp_is_aligned((void *) src1, VLEN)) || + (0 == htp_is_aligned((void *) dst, VLEN))) { + FARF(HIGH, "hvx_sub_f32: unaligned address in hvx op, possibly slower execution\n"); + unaligned_addr = 1; + } + + if ((1 == unaligned_addr) && (num_elems_whole != 0)) { + unaligned_loop = 1; + FARF(HIGH, "hvx_sub_f32: unaligned loop in hvx op, possibly slower execution\n"); + } + + if (0 == unaligned_loop) { + HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0; + HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1; + HVX_Vector * restrict vec_out = (HVX_Vector *) dst; + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(*vec_in1++, *vec_in2++); + *vec_out++ = Q6_Vsf_equals_Vqf32(v); + } + } else { + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector in1 = *(HVX_UVector *) (src0 + i * SIZEOF_FP32); + HVX_Vector in2 = *(HVX_UVector *) (src1 + i * SIZEOF_FP32); + + HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in1, in2); + + *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out); + } + } + + if (left_over > 0) { + const float * src0f = (const float *) src0 + num_elems_whole; + const float * src1f = (const float *) src1 + num_elems_whole; + float * dstf = (float *) dst + num_elems_whole; + + HVX_Vector in1 = *(HVX_UVector *) src0f; + HVX_Vector in2 = *(HVX_UVector *) src1f; + + HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in1, in2); + hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out)); + } +} + +void hvx_sub_f32_opt(const uint8_t * restrict src0, + const uint8_t * restrict src1, + uint8_t * restrict dst, + const int num_elems) { + htp_binary_ops_preamble; + + for (int i = 0; i < step_of_4; i++) { + HVX_Vector v1a = *(HVX_Vector *) src0_curr; + + HVX_Vector v1b = *(HVX_Vector *) src1_curr; + + HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); + + HVX_Vector v1 = Q6_Vqf32_vsub_VsfVsf(v1a, v1b); + + HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); + + HVX_Vector v3a = *(HVX_Vector *) (src0_curr + 2 * VLEN); + + HVX_Vector v2 = Q6_Vqf32_vsub_VsfVsf(v2a, v2b); + + *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); + + HVX_Vector v3b = *(HVX_Vector *) (src1_curr + 2 * VLEN); + + HVX_Vector v4a = *(HVX_Vector *) (src0_curr + 3 * VLEN); + + src0_curr += 4 * VLEN; + + HVX_Vector v3 = Q6_Vqf32_vsub_VsfVsf(v3a, v3b); + + *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); + + HVX_Vector v4b = *(HVX_Vector *) (src1_curr + 3 * VLEN); + + *(HVX_Vector *) (dst_curr + 2 * VLEN) = Q6_Vsf_equals_Vqf32(v3); + + HVX_Vector v4 = Q6_Vqf32_vsub_VsfVsf(v4a, v4b); + + src1_curr += 4 * VLEN; + + *(HVX_Vector *) (dst_curr + 3 * VLEN) = Q6_Vsf_equals_Vqf32(v4); + + dst_curr += 4 * VLEN; + } + for (int i = 0; i < step_of_2; i++) { + HVX_Vector v1a = *(HVX_Vector *) src0_curr; + + HVX_Vector v1b = *(HVX_Vector *) src1_curr; + + HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN); + + HVX_Vector v1 = Q6_Vqf32_vsub_VsfVsf(v1a, v1b); + + HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN); + + *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1); + + src0_curr += 2 * VLEN; + + HVX_Vector v2 = Q6_Vqf32_vsub_VsfVsf(v2a, v2b); + + src1_curr += 2 * VLEN; + + *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2); + + dst_curr += 2 * VLEN; + } + for (int i = 0; i < step_of_1; i++) { + HVX_Vector va = *(HVX_Vector *) src0_curr; + + src0_curr += VLEN; + + HVX_Vector vb = *(HVX_Vector *) src1_curr; + + src1_curr += VLEN; + + HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(va, vb); + + *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v); + + dst_curr += VLEN; + } + if (remaining > 0) { + HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr); + hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v)); + } +} + +void hvx_sub_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) { + size_t left_over = num_elems & (VLEN_FP32 - 1); + size_t num_elems_whole = num_elems - left_over; + + int unaligned_addr = 0; + int unaligned_loop = 0; + if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { + FARF(HIGH, "hvx_sub_scalar_f32: unaligned address in hvx op, possibly slower execution\n"); + unaligned_addr = 1; + } + + if ((1 == unaligned_addr) && (num_elems_whole != 0)) { + unaligned_loop = 1; + FARF(HIGH, "hvx_sub_scalar_f32: unaligned loop in hvx op, possibly slower execution\n"); + } + + HVX_Vector val_vec = hvx_vec_splat_fp32(val); + + if (0 == unaligned_loop) { + HVX_Vector * restrict vec_in1 = (HVX_Vector *) src; + HVX_Vector * restrict vec_out = (HVX_Vector *) dst; + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(*vec_in1++, val_vec); + *vec_out++ = Q6_Vsf_equals_Vqf32(v); + } + } else { + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); + + HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in, val_vec); + + *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out); + } + } + + if (left_over > 0) { + const float * srcf = (const float *) src + num_elems_whole; + float * dstf = (float *) dst + num_elems_whole; + + HVX_Vector in = *(HVX_UVector *) srcf; + + HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in, val_vec); + hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out)); + } +} + +float hvx_sum_of_squares_f32(const uint8_t * restrict src, const int num_elems) { + int left_over = num_elems & (VLEN_FP32 - 1); + int num_elems_whole = num_elems - left_over; + + if (0 == htp_is_aligned((void *) src, VLEN)) { + FARF(HIGH, "hvx_sum_of_squares_f32: unaligned address in hvx op, possibly slower execution\n"); + } + + assert((1 == htp_is_aligned((void *) src, VLEN)) || (0 == num_elems_whole)); + + HVX_Vector * restrict vec_in1 = (HVX_Vector *) src; + + HVX_Vector sum_vec_acc = Q6_V_vsplat_R(0x00000000); + HVX_Vector zero_vec = Q6_V_vsplat_R(0x00000000); + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1, *vec_in1); + sum_vec_acc = Q6_Vqf32_vadd_Vqf32Vqf32(sum_vec_acc, v); + vec_in1++; + } + + if (left_over > 0) { + const float * srcf = (const float *) src + num_elems_whole; + + HVX_Vector vec_left = *(HVX_UVector *) srcf; + + HVX_Vector vec_left_sq = Q6_Vqf32_vmpy_VsfVsf(vec_left, vec_left); + HVX_Vector vec_tmp = Q6_V_valign_VVR(vec_left_sq, zero_vec, left_over * SIZEOF_FP32); + + sum_vec_acc = Q6_Vqf32_vadd_Vqf32Vqf32(sum_vec_acc, vec_tmp); + } + + HVX_Vector v = hvx_vec_qf32_reduce_sum(sum_vec_acc); + return hvx_vec_get_fp32(Q6_Vsf_equals_Vqf32(v)); +} + +float hvx_self_sum_f32(const uint8_t * restrict src, const int num_elems) { + int left_over = num_elems & (VLEN_FP32 - 1); + int num_elems_whole = num_elems - left_over; + + int unaligned_addr = 0; + int unaligned_loop = 0; + if (0 == htp_is_aligned((void *) src, VLEN)) { + FARF(HIGH, "hvx_self_sum_f32: unaligned address in hvx op, possibly slower execution\n"); + unaligned_addr = 1; + } + + if ((1 == unaligned_addr) && (num_elems_whole != 0)) { + unaligned_loop = 1; + FARF(HIGH, "hvx_self_sum_f32: unaligned loop in hvx op, possibly slower execution\n"); + } + + HVX_Vector sum_vec = Q6_V_vsplat_R(0x00000000); + HVX_Vector zero_vec = Q6_V_vsplat_R(0x00000000); + + if (0 == unaligned_loop) { + HVX_Vector * vec_in = (HVX_Vector *) src; + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + // sum_vec = Q6_Vqf32_vadd_Vqf32Vsf(sum_vec, *vec_in++); + sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), *vec_in++); + } + } else { + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); + + sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), in); + } + } + + if (left_over > 0) { + const float * srcf = (const float *) src + num_elems_whole; + + HVX_Vector vec_left = *(HVX_UVector *) srcf; + HVX_Vector vec_tmp = Q6_V_valign_VVR(vec_left, zero_vec, left_over * SIZEOF_FP32); + // sum_vec = Q6_Vqf32_vadd_Vqf32Vsf(sum_vec, vec_tmp); + sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), vec_tmp); + } + + HVX_Vector v = hvx_vec_qf32_reduce_sum(sum_vec); + return hvx_vec_get_fp32(Q6_Vsf_equals_Vqf32(v)); +} + +void hvx_scale_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems, const float scale) { + int left_over = num_elems & (VLEN_FP32 - 1); + int num_elems_whole = num_elems - left_over; + + int unaligned_addr = 0; + int unaligned_loop = 0; + if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { + FARF(HIGH, "hvx_scale_f32: unaligned address in hvx op, possibly slower execution\n"); + unaligned_addr = 1; + } + + if ((1 == unaligned_addr) && (num_elems_whole != 0)) { + unaligned_loop = 1; + FARF(HIGH, "hvx_scale_f32: unaligned loop in hvx op, possibly slower execution\n"); + } + + HVX_Vector scale_vec = hvx_vec_splat_fp32(scale); + + if (0 == unaligned_loop) { + HVX_Vector * vec_in1 = (HVX_Vector *) src; + HVX_Vector * vec_out = (HVX_Vector *) dst; + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1++, scale_vec); + *vec_out++ = Q6_Vsf_equals_Vqf32(v); + } + } else { + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); + + HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in, scale_vec); + + *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out); + } + } + + if (left_over > 0) { + const float * srcf = (const float *) src + num_elems_whole; + float * dstf = (float *) dst + num_elems_whole; + + HVX_Vector in = *(HVX_UVector *) srcf; + + HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in, scale_vec); + hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out)); + } +} + +float hvx_self_max_f32(const uint8_t * restrict src, const int num_elems) { + int left_over = num_elems & (VLEN_FP32 - 1); + int num_elems_whole = num_elems - left_over; + + int unaligned_addr = 0; + int unaligned_loop = 0; + if (0 == htp_is_aligned((void *) src, VLEN)) { + FARF(HIGH, "hvx_self_max_f32: unaligned address in hvx op, possibly slower execution\n"); + unaligned_addr = 1; + } + + if ((1 == unaligned_addr) && (num_elems_whole != 0)) { + unaligned_loop = 1; + FARF(HIGH, "hvx_self_max_f32: unaligned loop in hvx op, possibly slower execution\n"); + } + + HVX_Vector vec_max = hvx_vec_splat_fp32(((const float *) src)[0]); + HVX_Vector vec_first = hvx_vec_splat_fp32(((const float *) src)[0]); + + if (0 == unaligned_loop) { + HVX_Vector * restrict vec_in = (HVX_Vector *) src; + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + vec_max = Q6_Vsf_vmax_VsfVsf(vec_max, *vec_in++); + } + } else { + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32); + + vec_max = Q6_Vsf_vmax_VsfVsf(vec_max, in); + } + } + + if (left_over > 0) { + const float * srcf = (const float *) src + num_elems_whole; + + HVX_Vector in = *(HVX_UVector *) srcf; + + HVX_Vector temp = Q6_V_valign_VVR(in, vec_first, left_over * SIZEOF_FP32); + vec_max = Q6_Vsf_vmax_VsfVsf(vec_max, temp); + } + + HVX_Vector v = hvx_vec_reduce_max_fp32(vec_max); + return hvx_vec_get_fp32(v); +} + +void hvx_min_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) { + size_t left_over = num_elems & (VLEN_FP32 - 1); + size_t num_elems_whole = num_elems - left_over; + + if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { + FARF(HIGH, "hvx_min_scalar_f32: unaligned address in hvx op, possibly slower execution\n"); + } + + assert((1 == htp_is_aligned((void *) src, VLEN)) || (0 == num_elems_whole)); + + const float * src_f = (const float *) src; + + HVX_Vector vec_min = Q6_V_vsplat_R(val); + + HVX_Vector * restrict vec_in = (HVX_Vector *) src; + HVX_Vector * restrict vec_out = (HVX_Vector *) dst; + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + vec_min = Q6_Vsf_vmin_VsfVsf(vec_min, *vec_in++); + *vec_out++ = Q6_Vsf_equals_Vqf32(vec_min); + } + + if (left_over > 0) { + const float * srcf = (const float *) src + num_elems_whole; + float * dstf = (float *) dst + num_elems_whole; + + HVX_Vector in = *(HVX_UVector *) srcf; + + vec_min = Q6_Vsf_vmin_VsfVsf(vec_min, in); + + hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(vec_min)); + } +} + +void hvx_clamp_scalar_f32(const uint8_t * restrict src, + const float limit_left, + const float limit_right, + uint8_t * restrict dst, + const int num_elems) { + size_t left_over = num_elems & (VLEN_FP32 - 1); + size_t num_elems_whole = num_elems - left_over; + + if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) { + FARF(HIGH, "hvx_clamp_scalar_f32: unaligned address in hvx op, possibly slower execution\n"); + } + + assert((1 == htp_is_aligned((void *) src, VLEN)) || (0 == num_elems_whole)); + + HVX_Vector * restrict vec_in = (HVX_Vector *) src; + HVX_Vector * restrict vec_out = (HVX_Vector *) dst; + + HVX_Vector range_left = hvx_vec_splat_fp32(limit_left); + HVX_Vector range_right = hvx_vec_splat_fp32(limit_right); + + #pragma unroll(4) + for (int i = 0; i < num_elems_whole; i += VLEN_FP32) { + HVX_Vector in_vec = *vec_in++; + HVX_Vector temp_v = in_vec; + + HVX_VectorPred pred_cap_right = Q6_Q_vcmp_gt_VsfVsf(in_vec, range_right); + HVX_VectorPred pred_cap_left = Q6_Q_vcmp_gt_VsfVsf(range_left, in_vec); + + in_vec = Q6_V_vmux_QVV(pred_cap_right, range_right, temp_v); + in_vec = Q6_V_vmux_QVV(pred_cap_left, range_left, temp_v); + + *vec_out++ = Q6_Vsf_equals_Vqf32(in_vec); + } + + if (left_over > 0) { + const float * srcf = (const float *) src + num_elems_whole; + float * dstf = (float *) dst + num_elems_whole; + + HVX_Vector in = *(HVX_UVector *) srcf; + + HVX_Vector temp_v = in; + + HVX_VectorPred pred_cap_right = Q6_Q_vcmp_gt_VsfVsf(in, range_right); + HVX_VectorPred pred_cap_left = Q6_Q_vcmp_gt_VsfVsf(range_left, in); + + in = Q6_V_vmux_QVV(pred_cap_right, range_right, temp_v); + in = Q6_V_vmux_QVV(pred_cap_left, range_left, temp_v); + + hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(in)); + } +} diff --git a/ggml/src/ggml-hexagon/htp/hvx-utils.h b/ggml/src/ggml-hexagon/htp/hvx-utils.h new file mode 100644 index 0000000000000..b2ca8e88f464e --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/hvx-utils.h @@ -0,0 +1,998 @@ +#ifndef HVX_UTILS_H +#define HVX_UTILS_H + +#include "ops-utils.h" + +#include +#include + +#define SIZEOF_FP32 (4) +#define SIZEOF_FP16 (2) +#define VLEN (128) +#define VLEN_FP32 (VLEN / SIZEOF_FP32) +#define VLEN_FP16 (VLEN / SIZEOF_FP16) + +static inline HVX_Vector hvx_vec_splat_fp32(float i) { + union { + float f; + int32_t i; + } fp32 = { .f = i }; + + return Q6_V_vsplat_R(fp32.i); +} + +static inline void hvx_vec_store_u(void * addr, uint32_t n, HVX_Vector v) { + // Rotate as needed. + v = Q6_V_vlalign_VVR(v, v, (size_t) addr); + + uint32_t left_off = (size_t) addr & 127; + uint32_t right_off = left_off + n; + + HVX_VectorPred ql_not = Q6_Q_vsetq_R((size_t) addr); + HVX_VectorPred qr = Q6_Q_vsetq2_R(right_off); + + if (right_off > 128) { + Q6_vmem_QRIV(qr, (HVX_Vector *) addr + 1, v); + // all 1's + qr = Q6_Q_vcmp_eq_VbVb(v, v); + } + + ql_not = Q6_Q_or_QQn(ql_not, qr); + Q6_vmem_QnRIV(ql_not, (HVX_Vector *) addr, v); +} + +static inline void hvx_vec_store_a(void * ptr, size_t n, HVX_Vector v) { + assert((unsigned long) ptr % 128 == 0); + + HVX_VectorPred ql_not = Q6_Q_vsetq_R((size_t) ptr); + HVX_VectorPred qr = Q6_Q_vsetq2_R(n); + ql_not = Q6_Q_or_QQn(ql_not, qr); + Q6_vmem_QnRIV(ql_not, (HVX_Vector *) ptr, v); +} + +static inline HVX_Vector hvx_vec_repl4(HVX_Vector v) { + // vdelta control to replicate first 4 bytes across all elements + static const uint8_t __attribute__((aligned(128))) repl[128] = { + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, + 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, + 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, + 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, + 0x40, 0x40, 0x40, 0x40, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, + 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, + 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, + 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, + }; + + HVX_Vector ctrl = *(HVX_Vector *) repl; + return Q6_V_vdelta_VV(v, ctrl); +} + +// copy n fp16 elements : source and destination are aligned to HVX Vector (128) +static inline void hvx_copy_fp16_aa(uint8_t * restrict dst, const uint8_t * restrict src, uint32_t n) { + HVX_Vector * restrict vdst = (HVX_Vector *) dst; + HVX_Vector * restrict vsrc = (HVX_Vector *) src; + + assert((unsigned long) dst % 128 == 0); + assert((unsigned long) src % 128 == 0); + + uint32_t nvec = n / 64; + uint32_t nloe = n % 64; + + uint32_t i = 0; + + #pragma unroll(4) + for (; i < nvec; i++) { + HVX_Vector v = vsrc[i]; + vdst[i] = v; + } + + if (nloe) { + HVX_Vector v = vsrc[i]; + hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(__fp16), v); + } +} + +// copy n fp16 elements : source is aligned, destination is potentially unaligned +static inline void hvx_copy_fp16_ua(uint8_t * restrict dst, const uint8_t * restrict src, uint32_t n) { + HVX_UVector * restrict vdst = (HVX_UVector *) dst; + HVX_Vector * restrict vsrc = (HVX_Vector *) src; + + assert((unsigned long) src % 128 == 0); + + uint32_t nvec = n / 64; + uint32_t nloe = n % 64; + + uint32_t i = 0; + + #pragma unroll(4) + for (; i < nvec; i++) { + HVX_Vector v = vsrc[i]; + vdst[i] = v; + } + + if (nloe) { + HVX_Vector v = vsrc[i]; + hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(__fp16), v); + } +} + +// copy n fp16 elements : source is aligned, destination is potentially unaligned +static inline void hvx_copy_fp16_au(uint8_t * restrict dst, const uint8_t * restrict src, uint32_t n) { + HVX_Vector * restrict vdst = (HVX_Vector *) dst; + HVX_UVector * restrict vsrc = (HVX_UVector *) src; + + assert((unsigned long) dst % 128 == 0); + + uint32_t nvec = n / 64; + uint32_t nloe = n % 64; + + uint32_t i = 0; + + #pragma unroll(4) + for (; i < nvec; i++) { + HVX_Vector v = vsrc[i]; + vdst[i] = v; + } + + if (nloe) { + HVX_Vector v = vsrc[i]; + hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(__fp16), v); + } +} + +// copy n fp32 elements : source and destination are aligned to HVX Vector (128) +static inline void hvx_copy_fp32_aa(uint8_t * restrict dst, const uint8_t * restrict src, uint32_t n) { + HVX_Vector * restrict vdst = (HVX_Vector *) dst; + HVX_Vector * restrict vsrc = (HVX_Vector *) src; + + assert((unsigned long) dst % 128 == 0); + assert((unsigned long) src % 128 == 0); + + uint32_t nvec = n / 32; + uint32_t nloe = n % 32; + + uint32_t i = 0; + + #pragma unroll(4) + for (; i < nvec; i++) { + HVX_Vector v = vsrc[i]; + vdst[i] = v; + } + + if (nloe) { + HVX_Vector v = vsrc[i]; + hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(float), v); + } +} + +// copy n fp32 elements : source is aligned, destination is unaligned +static inline void hvx_copy_fp32_ua(uint8_t * restrict dst, const uint8_t * restrict src, uint32_t n) { + HVX_UVector * restrict vdst = (HVX_UVector *) dst; + HVX_Vector * restrict vsrc = (HVX_Vector *) src; + + assert((unsigned long) src % 128 == 0); + + uint32_t nvec = n / 32; + uint32_t nloe = n % 32; + + uint32_t i = 0; + + #pragma unroll(4) + for (; i < nvec; i++) { + HVX_Vector v = vsrc[i]; + vdst[i] = v; + } + + if (nloe) { + HVX_Vector v = vsrc[i]; + hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(float), v); + } +} + +// copy n fp32 elements : source is unaligned, destination is aligned +static inline void hvx_copy_fp32_au(uint8_t * restrict dst, const uint8_t * restrict src, uint32_t n) { + HVX_Vector * restrict vdst = (HVX_Vector *) dst; + HVX_UVector * restrict vsrc = (HVX_UVector *) src; + + assert((unsigned long) dst % 128 == 0); + + uint32_t nvec = n / 32; + uint32_t nloe = n % 32; + + uint32_t i = 0; + + #pragma unroll(4) + for (; i < nvec; i++) { + HVX_Vector v = vsrc[i]; + vdst[i] = v; + } + + if (nloe) { + HVX_Vector v = vsrc[i]; + hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(float), v); + } +} + +// bcast 1 fp32 element from source to n fp32 elements in destination : destination is aligned +static inline void hvx_bcast_fp32_a(uint8_t * restrict dst, float elem, uint32_t n) { + HVX_Vector * restrict vdst = (HVX_Vector *) dst; + + HVX_Vector velem = hvx_vec_splat_fp32(elem); + + assert((unsigned long) dst % 128 == 0); + + uint32_t nvec = n / 32; + uint32_t nloe = n % 32; + + uint32_t i = 0; + + #pragma unroll(4) + for (; i < nvec; i++) { + vdst[i] = velem; + } + + if (nloe) { + hvx_vec_store_u((void *) &vdst[i], nloe * sizeof(float), velem); + } +} + +static __attribute__((always_inline)) int32_t is_in_one_chunk(void * addr, uint32_t n, uint32_t chunk_size) { + uint32_t left_off = (size_t) addr & (chunk_size - 1); + uint32_t right_off = left_off + n; + return right_off <= chunk_size; +} + +static void hvx_vec_dump_fp16_n(char * pref, HVX_Vector v, uint32_t n) { + union { + HVX_Vector v; + __fp16 d[64]; + } u = { .v = v }; + + const uint32_t n0 = n / 16; + const uint32_t n1 = n % 16; + int i = 0; + for (; i < n0; i++) { + htp_dump_fp16_line(pref, u.d + (16 * i), 16); + } + if (n1) { + htp_dump_fp16_line(pref, u.d + (16 * i), n1); + } +} + +static void hvx_vec_dump_fp16(char * pref, HVX_Vector v) { + hvx_vec_dump_fp16_n(pref, v, 64); +} + +static void hvx_vec_dump_fp32_n(char * pref, HVX_Vector v, uint32_t n) { + union { + HVX_Vector v; + float d[32]; + } u = { .v = v }; + + const uint32_t n0 = n / 16; + const uint32_t n1 = n % 16; + int i = 0; + for (; i < n0; i++) { + htp_dump_fp32_line(pref, u.d + (16 * i), 16); + } + if (n1) { + htp_dump_fp32_line(pref, u.d + (16 * i), n1); + } +} + +static void hvx_vec_dump_fp32_hmt(char * pref, HVX_Vector v) { + union { + HVX_Vector v; + float d[32]; + } u = { .v = v }; + + FARF(HIGH, "%s: %.6f %.6f %.6f %.6f ... %.6f %.6f %.6f %.6f ... %.6f %.6f %.6f %.6f\n", pref, u.d[0], u.d[1], + u.d[2], u.d[3], u.d[12], u.d[13], u.d[14], u.d[15], u.d[28], u.d[29], u.d[30], u.d[31]); +} + +static void hvx_vec_dump_fp32(char * pref, HVX_Vector v) { + hvx_vec_dump_fp32_n(pref, v, 32); +} + +static void hvx_vec_dump_int32(char * pref, HVX_Vector v) { + union { + HVX_Vector v; + int32_t d[32]; + } u = { .v = v }; + + for (int i = 0; i < 32 / 16; i++) { + htp_dump_int32_line(pref, u.d + (16 * i), 16); + } +} + +static void hvx_vec_dump_int32_hmt(char * pref, HVX_Vector v) { + union { + HVX_Vector v; + int32_t d[32]; + } u = { .v = v }; + + FARF(HIGH, "%s: %d %d %d %d ... %d %d %d %d ... %d %d %d %d\n", pref, u.d[0], u.d[1], u.d[2], u.d[3], u.d[12], + u.d[13], u.d[14], u.d[15], u.d[28], u.d[29], u.d[30], u.d[31]); +} + +static void hvx_vec_dump_int8_hmt(char * pref, HVX_Vector v) { + union { + HVX_Vector v; + int8_t d[128]; + } u = { .v = v }; + + FARF(HIGH, "%s: %d %d %d %d ... %d %d %d %d ... %d %d %d %d\n", pref, u.d[0], u.d[1], u.d[2], u.d[3], u.d[60], + u.d[61], u.d[62], u.d[63], u.d[124], u.d[125], u.d[126], u.d[127]); +} + +static void hvx_vec_dump_int8(char * pref, HVX_Vector v) { + union { + HVX_Vector v; + int8_t d[128]; + } u = { .v = v }; + + for (int i = 0; i < 128 / 16; i++) { + htp_dump_int8_line(pref, u.d + (16 * i), 16); + } +} + +static void hvx_vec_dump_uint8(char * pref, HVX_Vector v) { + union { + HVX_Vector v; + uint8_t d[128]; + } u = { .v = v }; + + for (int i = 0; i < 128 / 16; i++) { + htp_dump_uint8_line(pref, u.d + (16 * i), 16); + } +} + +static bool hvx_vec_eq(HVX_Vector v0, HVX_Vector v1, size_t n) { + typedef union { + HVX_Vector v; + int8_t d[128]; + } U; + + U u0 = { .v = v0 }; + U u1 = { .v = v1 }; + + for (int i = 0; i < n; i++) { + if (u0.d[i] != u1.d[i]) { + return false; + } + } + + return true; +} + +static inline float hvx_vec_get_fp32(HVX_Vector v) { + float __attribute__((aligned(128))) x; + hvx_vec_store_a(&x, 4, v); + return x; +} + +static inline HVX_Vector hvx_vec_int32_reduce_sum_n(HVX_Vector in, unsigned int n) { + unsigned int total = n * 4; // total vec nbytes + unsigned int width = 4; // int32 + + HVX_Vector sum = in, sum_t; + while (width < total) { + sum_t = Q6_V_vror_VR(sum, width); // rotate right + sum = Q6_Vw_vadd_VwVw(sum_t, sum); // elementwise sum + width = width << 1; + } + return sum; +} + +static inline HVX_Vector hvx_vec_int32_reduce_sum(HVX_Vector in) { + return hvx_vec_int32_reduce_sum_n(in, 32); +} + +static inline HVX_Vector hvx_vec_qf32_reduce_sum_n(HVX_Vector in, unsigned int n) { + unsigned int total = n * 4; // total vec nbytes + unsigned int width = 4; // fp32 nbytes + + HVX_Vector sum = in, sum_t; + while (width < total) { + sum_t = Q6_V_vror_VR(Q6_Vsf_equals_Vqf32(sum), width); // rotate right + sum = Q6_Vqf32_vadd_Vqf32Vsf(sum, sum_t); // elementwise sum + width = width << 1; + } + return sum; +} + +static inline HVX_Vector hvx_vec_qf32_reduce_sum(HVX_Vector in) { + return hvx_vec_qf32_reduce_sum_n(in, 32); +} + +static inline HVX_Vector hvx_vec_fp32_reduce_sum_n(HVX_Vector in, unsigned int n) { + unsigned int total = n * 4; // total vec nbytes + unsigned int width = 4; // fp32 nbytes + + HVX_Vector sum = in, sum_t; + while (width < total) { + sum_t = Q6_V_vror_VR(sum, width); // rotate right + sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(sum, sum_t)); // elementwise sum + width = width << 1; + } + return sum; +} + +static inline HVX_Vector hvx_vec_fp32_reduce_sum(HVX_Vector in) { + return hvx_vec_fp32_reduce_sum_n(in, 32); +} + +static inline HVX_Vector hvx_vec_reduce_max_fp16(HVX_Vector in) { + unsigned total = 128; // total vec nbytes + unsigned width = 2; // fp16 nbytes + + HVX_Vector _max = in, _max_t; + while (width < total) { + _max_t = Q6_V_vror_VR(_max, width); // rotate right + _max = Q6_Vhf_vmax_VhfVhf(_max_t, _max); // elementwise max + width = width << 1; + } + + return _max; +} + +static inline HVX_Vector hvx_vec_reduce_max2_fp16(HVX_Vector in, HVX_Vector _max) { + unsigned total = 128; // total vec nbytes + unsigned width = 2; // fp32 nbytes + + HVX_Vector _max_t; + + _max = Q6_Vhf_vmax_VhfVhf(in, _max); + while (width < total) { + _max_t = Q6_V_vror_VR(_max, width); // rotate right + _max = Q6_Vhf_vmax_VhfVhf(_max_t, _max); // elementwise max + width = width << 1; + } + + return _max; +} + +static inline HVX_Vector hvx_vec_reduce_max_fp32(HVX_Vector in) { + unsigned total = 128; // total vec nbytes + unsigned width = 4; // fp32 nbytes + + HVX_Vector _max = in, _max_t; + while (width < total) { + _max_t = Q6_V_vror_VR(_max, width); // rotate right + _max = Q6_Vsf_vmax_VsfVsf(_max_t, _max); // elementwise max + width = width << 1; + } + + return _max; +} + +static inline HVX_Vector hvx_vec_reduce_max2_fp32(HVX_Vector in, HVX_Vector _max) { + unsigned total = 128; // total vec nbytes + unsigned width = 4; // fp32 nbytes + + HVX_Vector _max_t; + + _max = Q6_Vsf_vmax_VsfVsf(in, _max); + while (width < total) { + _max_t = Q6_V_vror_VR(_max, width); // rotate right + _max = Q6_Vsf_vmax_VsfVsf(_max_t, _max); // elementwise max + width = width << 1; + } + + return _max; +} + +static inline HVX_Vector hvx_vec_abs_fp16(HVX_Vector v) { + // abs by clearing the fp16 sign bit + HVX_Vector mask = Q6_Vh_vsplat_R(0x7fff); + return Q6_V_vand_VV(v, mask); +} + +static inline HVX_Vector hvx_vec_neg_fp16(HVX_Vector v) { + // neg by setting the fp16 sign bit + HVX_Vector mask = Q6_Vh_vsplat_R(0x8000); + return Q6_V_vor_VV(v, mask); +} + +static inline HVX_Vector hvx_vec_abs_fp32(HVX_Vector v) { + // abs by clearing the fp32 sign bit + HVX_Vector mask = Q6_V_vsplat_R(0x7fffffff); + return Q6_V_vand_VV(v, mask); +} + +static inline HVX_Vector hvx_vec_neg_fp32(HVX_Vector v) { +#if __HTP_ARCH__ > 75 + return Q6_Vsf_vfneg_Vsf(v); +#else + // neg by setting the fp32 sign bit + HVX_Vector mask = Q6_V_vsplat_R(0x80000000); + return Q6_V_vor_VV(v, mask); +#endif // __HTP_ARCH__ > 75 +} + +// ==================================================== +// FUNCTION: 1/(x+1) y(0) = 1, y(0.5) = 0.6667, y(1) = 0.5 +// Order:3; continuity: True; Ends forced: True +// Mode: unsigned; Result fractional bits: 14 +// Peak Error: 1.1295e-04 Rms Error: 2.8410e-05 Mean Error: 1.1370e-05 +// 32769 -32706 31252 -10589 +// 32590 -30635 22793 -4493 +// 32066 -27505 16481 -2348 +// 31205 -24054 11849 -1306 + +static inline HVX_Vector hvx_vec_recip_xp1_O3_unsigned(HVX_Vector vx) { + // input is 0..0xffff representing 0.0 .. 1.0 + HVX_Vector p; + p = Q6_Vh_vlut4_VuhPh(vx, 0xFAE6F6D4EE73D6A3ull); + p = Q6_Vh_vmpa_VhVhVuhPuh_sat(p, vx, 0x2E49406159097A14ull); + p = Q6_Vh_vmps_VhVhVuhPuh_sat(p, vx, 0x5DF66B7177AB7FC2ull); + p = Q6_Vh_vmpa_VhVhVuhPuh_sat(p, vx, 0x79E57D427F4E8001ull); + return p; // signed result, 14 fractional bits +} + +// Find reciprocal of fp16. +// (1) first, convert to fp32, multiplying by 1.0; this is done to +// handle denormals. Ignoring sign and zero, result should be at +// least 5.9604645e-08 (32-bit code 0x33800000) and at most 131008 (0x47ffe000) +// (exponent in range [103,143]) +// (2) extract the mantissa into 16-bit unsigned; find reciprocal using a fitted poly +// (3) put this, along with '253-exp' (exp from (1)) together to make an qf32 +// (4) convert that to fp16 +// (5) put sign back in. Also, if the original value (w/o sign) was <0x81, replace +// the result with the max value. +static inline HVX_Vector hvx_vec_inverse_fp16(HVX_Vector vals) { + HVX_Vector em_mask = Q6_Vh_vsplat_R(0x7FFF); + HVX_Vector avals = Q6_V_vand_VV(vals, em_mask); + HVX_VectorPred is_neg = Q6_Q_vcmp_gt_VhVh(avals, vals); + // is too small to 1/x ? for 'standard' fp16, this would be 0x101 + HVX_VectorPred is_small = Q6_Q_vcmp_gt_VhVh(Q6_Vh_vsplat_R(0x101), avals); + + HVX_VectorPair to_qf32 = Q6_Wqf32_vmpy_VhfVhf(avals, Q6_Vh_vsplat_R(0x3C00)); // *1.0 + HVX_Vector to_f32_0 = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(to_qf32)); + HVX_Vector to_f32_1 = Q6_Vsf_equals_Vqf32(Q6_V_hi_W(to_qf32)); + + // bits 22..13 contain the mantissa now (w/o hidden bit); move to bit 14..5 of a 16-bit vector + HVX_Vector mant_u16 = Q6_Vh_vshuffo_VhVh(Q6_Vw_vasl_VwR(to_f32_1, 9), Q6_Vw_vasl_VwR(to_f32_0, 9)); + // likewise extract the upper 16 from each, containing the exponents in range 103..142 + HVX_Vector exp_u16 = Q6_Vh_vshuffo_VhVh(to_f32_1, to_f32_0); + //Get exponent in IEEE 32-bit representation + exp_u16 = Q6_Vuh_vlsr_VuhR(exp_u16, 7); + + // so, mant_u16 contains an unbiased mantissa in upper 10 bits of each u16 lane + // We can consider it to be x-1.0, with 16 fractional bits, where 'x' is in range [1.0,2.0) + // Use poly to transform to 1/x, with 14 fractional bits + // + HVX_Vector rm = hvx_vec_recip_xp1_O3_unsigned(mant_u16); + + HVX_Vector vcl0 = Q6_Vuh_vcl0_Vuh(rm); //count leading zeros + + // Get mantissa for 16-bit represenation + HVX_Vector mant_recip = Q6_V_vand_VV(Q6_Vh_vasr_VhR(Q6_Vh_vasl_VhVh(rm, vcl0), 5), Q6_Vh_vsplat_R(0x03FF)); + + //Compute Reciprocal Exponent + HVX_Vector exp_recip = + Q6_Vh_vsub_VhVh(Q6_Vh_vsub_VhVh(Q6_Vh_vsplat_R(254), exp_u16), Q6_Vh_vsub_VhVh(vcl0, Q6_Vh_vsplat_R(1))); + //Convert it for 16-bit representation + exp_recip = Q6_Vh_vadd_VhVh_sat(Q6_Vh_vsub_VhVh(exp_recip, Q6_Vh_vsplat_R(127)), Q6_Vh_vsplat_R(15)); + exp_recip = Q6_Vh_vasl_VhR(exp_recip, 10); + + //Merge exponent and mantissa for reciprocal + HVX_Vector recip = Q6_V_vor_VV(exp_recip, mant_recip); + // map 'small' inputs to standard largest value 0x7bff + recip = Q6_V_vmux_QVV(is_small, Q6_Vh_vsplat_R(0x7bff), recip); + // add sign back + recip = Q6_V_vandor_VQR(recip, is_neg, 0x80008000); + return recip; +} + +#define IEEE_VSF_EXPLEN (8) +#define IEEE_VSF_EXPBIAS (127) +#define IEEE_VSF_EXPMASK (0xFF) +#define IEEE_VSF_MANTLEN (23) +#define IEEE_VSF_MANTMASK (0x7FFFFF) +#define IEEE_VSF_MIMPMASK (0x800000) + +static inline HVX_Vector hvx_vec_truncate_fp32(HVX_Vector in_vec) { + HVX_Vector mask_mant_v = Q6_V_vsplat_R(IEEE_VSF_MANTMASK); + HVX_Vector mask_impl_v = Q6_V_vsplat_R(IEEE_VSF_MIMPMASK); + HVX_Vector const_zero_v = Q6_V_vzero(); + + HVX_VectorPred q_negative = Q6_Q_vcmp_gt_VwVw(const_zero_v, in_vec); + + HVX_Vector expval_v = in_vec >> IEEE_VSF_MANTLEN; + expval_v &= IEEE_VSF_EXPMASK; + expval_v -= IEEE_VSF_EXPBIAS; + + // negative exp == fractional value + HVX_VectorPred q_negexp = Q6_Q_vcmp_gt_VwVw(const_zero_v, expval_v); + + HVX_Vector rshift_v = IEEE_VSF_MANTLEN - expval_v; // fractional bits - exp shift + + HVX_Vector mant_v = in_vec & mask_mant_v; // obtain mantissa + HVX_Vector vout = Q6_Vw_vadd_VwVw(mant_v, mask_impl_v); // add implicit 1.0 + + vout = Q6_Vw_vasr_VwVw(vout, rshift_v); // shift to obtain truncated integer + vout = Q6_V_vmux_QVV(q_negexp, const_zero_v, vout); // expval<0 -> 0 + + HVX_Vector neg_vout = -vout; + + vout = Q6_V_vmux_QVV(q_negative, neg_vout, vout); // handle negatives + + return (vout); +} + +static inline HVX_Vector hvx_vec_floor_fp32(HVX_Vector in_vec) { + HVX_Vector mask_mant_v = Q6_V_vsplat_R(IEEE_VSF_MANTMASK); + HVX_Vector mask_impl_v = Q6_V_vsplat_R(IEEE_VSF_MIMPMASK); + HVX_Vector const_mnlen_v = Q6_V_vsplat_R(IEEE_VSF_MANTLEN); + HVX_Vector const_zero_v = Q6_V_vzero(); + HVX_Vector const_negone_v = Q6_V_vsplat_R(0xbf800000); // -1 IEEE vsf + + HVX_VectorPred q_negative = Q6_Q_vcmp_gt_VwVw(const_zero_v, in_vec); + + HVX_Vector expval_v = in_vec >> IEEE_VSF_MANTLEN; + expval_v &= IEEE_VSF_EXPMASK; + expval_v -= IEEE_VSF_EXPBIAS; + + HVX_VectorPred q_negexp = Q6_Q_vcmp_gt_VwVw(const_zero_v, expval_v); + HVX_VectorPred q_expltmn = Q6_Q_vcmp_gt_VwVw(const_mnlen_v, expval_v); + HVX_VectorPred q_negexp_pos = Q6_Q_vcmp_gtand_QVwVw(q_negexp, in_vec, const_zero_v); + HVX_VectorPred q_negexp_neg = Q6_Q_vcmp_gtand_QVwVw(q_negexp, const_zero_v, in_vec); + + // if expval < 0 (q_negexp) // <0, floor is 0 + // if vin > 0 + // floor = 0 + // if vin < 0 + // floor = -1 + // if expval < mant_len (q_expltmn) // >0, but fraction may exist + // get sign (q_negative) + // mask >> expval // fraction bits to mask off + // vout = ~(mask) // apply mask to remove fraction + // if (qneg) // negative floor is one less (more, sign bit for neg) + // vout += ((impl_mask) >> expval) + // if (mask && vin) + // vout = vin + // else // already an integer + // ; // no change + + // compute floor + mask_mant_v >>= expval_v; + HVX_Vector neg_addin_v = mask_impl_v >> expval_v; + HVX_Vector vout_neg_addin = Q6_Vw_vadd_VwVw(in_vec, neg_addin_v); + HVX_Vector vout = Q6_V_vmux_QVV(q_negative, vout_neg_addin, in_vec); + + HVX_Vector mask_chk_v = Q6_V_vand_VV(in_vec, mask_mant_v); // chk if bits set + HVX_VectorPred q_integral = Q6_Q_vcmp_eq_VwVw(const_zero_v, mask_chk_v); + + HVX_Vector not_mask_v = Q6_V_vnot_V(mask_mant_v); // frac bits to clear + HVX_Vector vfrfloor_v = Q6_V_vand_VV(vout, not_mask_v); // clear frac bits + + vout = in_vec; + vout = Q6_V_vmux_QVV(q_expltmn, vfrfloor_v, vout); // expval0 -> 0 + vout = Q6_V_vmux_QVV(q_negexp_neg, const_negone_v, vout); // expval<0 x<0 -> -1 + + return vout; +} + +static inline HVX_Vector hvx_vec_i16_from_hf_rnd_sat(HVX_Vector vin) { + // This looks complicated. + // Ideally should just be Q6_Vh_equals_Vhf(vin) + // but that instruction does not do proper rounding. + + // convert to qf32, multiplying by 1.0 in the process. + HVX_VectorPair v32 = Q6_Wqf32_vmpy_VhfVhf(vin, Q6_Vh_vsplat_R(0x3C00)); + + // 'in-range' values are +/32752. + // add 192K to it, convert to sf + HVX_Vector v192K = Q6_V_vsplat_R(0x48400000); + HVX_Vector vsf_0 = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(Q6_V_lo_W(v32), v192K)); + HVX_Vector vsf_1 = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(Q6_V_hi_W(v32), v192K)); + + // for in-range cases, result is {163858... 229360} so the exponent is always 144. + // if we extract bits 21..0 as a signed quantity, and round 6 bits off, that will be the answer. + // Start by <<10 to get the final 'sign' bit in bit 15... + vsf_0 = Q6_Vw_vasl_VwR(vsf_0, 10); + vsf_1 = Q6_Vw_vasl_VwR(vsf_1, 10); + + // now round down to 16 + return Q6_Vh_vround_VwVw_sat(vsf_1, vsf_0); +} + +static inline HVX_Vector hvx_vec_inverse_fp32(HVX_Vector v_sf) { + HVX_Vector inv_aprox_sf = Q6_V_vsplat_R(0x7EEEEBB3); + HVX_Vector two_sf = hvx_vec_splat_fp32(2.0); + + // First approximation + HVX_Vector i_sf = Q6_Vw_vsub_VwVw(inv_aprox_sf, v_sf); + + HVX_Vector r_qf; + + // Refine + r_qf = Q6_Vqf32_vmpy_VsfVsf( + i_sf, Q6_Vsf_equals_Vqf32(Q6_Vqf32_vsub_VsfVsf(two_sf, Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(i_sf, v_sf))))); + r_qf = Q6_Vqf32_vmpy_Vqf32Vqf32( + r_qf, Q6_Vqf32_vsub_VsfVsf(two_sf, Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(r_qf), v_sf)))); + r_qf = Q6_Vqf32_vmpy_Vqf32Vqf32( + r_qf, Q6_Vqf32_vsub_VsfVsf(two_sf, Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(r_qf), v_sf)))); + + return Q6_Vsf_equals_Vqf32(r_qf); +} + +#define FAST_SIGMOID_LOG2F (0x3fb8aa3b) // 1.442695022 +#define FAST_SIGMOID_C1 (0x3d009076) // 0.03138777 +#define FAST_SIGMOID_C2 (0x3e8d74bd) // 0.276281267 +#define FAST_SIGMOID_C3 (0x3f000000) // 0.5 + +static inline HVX_Vector hvx_vec_fast_sigmoid_fp32(HVX_Vector v) { + v = Q6_Vqf32_vmpy_VsfVsf(v, Q6_V_vsplat_R(FAST_SIGMOID_LOG2F)); + v = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v), Q6_V_vsplat_R(FAST_SIGMOID_C3)); + + HVX_Vector in_int = hvx_vec_truncate_fp32(Q6_Vsf_equals_Vqf32(v)); + HVX_Vector x = Q6_Vqf32_vsub_Vqf32Vsf(v, Q6_Vsf_equals_Vw(in_int)); + HVX_Vector xx = Q6_Vqf32_vmpy_Vqf32Vqf32(x, x); + + HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(xx), Q6_V_vsplat_R(FAST_SIGMOID_C2)); + v1 = Q6_Vqf32_vadd_Vqf32Vsf(v1, Q6_V_vsplat_R(FAST_SIGMOID_LOG2F)); + + HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(x), Q6_V_vsplat_R(FAST_SIGMOID_C1)); + v2 = Q6_Vqf32_vmpy_Vqf32Vqf32(v2, xx); + v2 = Q6_Vqf32_vadd_Vqf32Vqf32(v2, x); + + HVX_Vector v3 = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vqf32(v2, v1)); + HVX_Vector v3_exponent = Q6_Vw_vasl_VwR(v3, 1); + v3_exponent = Q6_Vuw_vlsr_VuwR(v3_exponent, 24); + v3_exponent = Q6_Vw_vadd_VwVw(in_int, v3_exponent); + v3 = Q6_Vw_vaslacc_VwVwR(v3, in_int, 24); + + HVX_Vector v4 = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vsub_Vqf32Vqf32(v2, v1)); + HVX_Vector v5 = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vsub_VsfVsf(v3, v4)); + + HVX_Vector res = hvx_vec_inverse_fp32(v5); + res = Q6_Vqf32_vmpy_VsfVsf(v3, res); + + return Q6_Vsf_equals_Vqf32(res); +} + +#define EXP_COEFF_5 (0x39506967) // 0.000198757 = 1/(7!) +#define EXP_COEFF_4 (0x3AB743CE) // 0.0013982 = 1/(6!) +#define EXP_COEFF_3 (0x3C088908) // 0.00833345 = 1/(5!) +#define EXP_COEFF_2 (0x3D2AA9C1) // 0.416658 = 1/(4!) +#define EXP_COEFF_1 (0x3E2AAAAA) // 0.16666667 = 1/(3!) +#define EXP_COEFF_0 (0x3F000000) // 0.5 = 1/(2!) +#define EXP_LOGN2 (0x3F317218) // ln(2) = 0.6931471805 +#define EXP_LOG2E (0x3FB8AA3B) // log2(e) = 1/ln(2) = 1.4426950408 +#define EXP_ONE (0x3f800000) // 1.0 +#define EXP_RANGE_R (0x41a00000) // 20.0 +#define EXP_RANGE_L (0xc1a00000) // -20.0 + +static inline HVX_Vector hvx_vec_exp_fp32(HVX_Vector in_vec) { + HVX_Vector z_qf32_v; + HVX_Vector x_v; + HVX_Vector x_qf32_v; + HVX_Vector y_v; + HVX_Vector k_v; + HVX_Vector f_v; + HVX_Vector epsilon_v; + HVX_Vector log2e = Q6_V_vsplat_R(EXP_LOG2E); + HVX_Vector logn2 = Q6_V_vsplat_R(EXP_LOGN2); + HVX_Vector E_const; + HVX_Vector zero_v = Q6_V_vzero(); + + // exp(x) is approximated as follows: + // f = floor(x/ln(2)) = floor(x*log2(e)) + // epsilon = x - f*ln(2) + // exp(x) = exp(epsilon+f*ln(2)) + // = exp(epsilon)*exp(f*ln(2)) + // = exp(epsilon)*2^f + // + // Since epsilon is close to zero, it can be approximated with its Taylor series: + // exp(x) ~= 1+x+x^2/2!+x^3/3!+...+x^n/n!+... + // Preserving the first eight elements, we get: + // exp(x) ~= 1+x+e0*x^2+e1*x^3+e2*x^4+e3*x^5+e4*x^6+e5*x^7 + // = 1+x+(E0+(E1+(E2+(E3+(E4+E5*x)*x)*x)*x)*x)*x^2 + + HVX_Vector temp_v = in_vec; + + // Clamp inputs to (-20.0, 20.0) + HVX_VectorPred pred_cap_right = Q6_Q_vcmp_gt_VsfVsf(in_vec, Q6_V_vsplat_R(EXP_RANGE_R)); + HVX_VectorPred pred_cap_left = Q6_Q_vcmp_gt_VsfVsf(Q6_V_vsplat_R(EXP_RANGE_L), in_vec); + + in_vec = Q6_V_vmux_QVV(pred_cap_right, Q6_V_vsplat_R(EXP_RANGE_R), temp_v); + in_vec = Q6_V_vmux_QVV(pred_cap_left, Q6_V_vsplat_R(EXP_RANGE_L), temp_v); + + epsilon_v = Q6_Vqf32_vmpy_VsfVsf(log2e, in_vec); + epsilon_v = Q6_Vsf_equals_Vqf32(epsilon_v); + + // f_v is the floating point result and k_v is the integer result + f_v = hvx_vec_floor_fp32(epsilon_v); + k_v = hvx_vec_truncate_fp32(f_v); + + x_qf32_v = Q6_Vqf32_vadd_VsfVsf(in_vec, zero_v); + + // x = x - f_v * logn2; + epsilon_v = Q6_Vqf32_vmpy_VsfVsf(f_v, logn2); + x_qf32_v = Q6_Vqf32_vsub_Vqf32Vqf32(x_qf32_v, epsilon_v); + // normalize before every QFloat's vmpy + x_qf32_v = Q6_Vqf32_vadd_Vqf32Vsf(x_qf32_v, zero_v); + + // z = x * x; + z_qf32_v = Q6_Vqf32_vmpy_Vqf32Vqf32(x_qf32_v, x_qf32_v); + z_qf32_v = Q6_Vqf32_vadd_Vqf32Vsf(z_qf32_v, zero_v); + + x_v = Q6_Vsf_equals_Vqf32(x_qf32_v); + + // y = E4 + E5 * x; + E_const = Q6_V_vsplat_R(EXP_COEFF_5); + y_v = Q6_Vqf32_vmpy_VsfVsf(E_const, x_v); + E_const = Q6_V_vsplat_R(EXP_COEFF_4); + y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, E_const); + y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, zero_v); + + // y = E3 + y * x; + E_const = Q6_V_vsplat_R(EXP_COEFF_3); + y_v = Q6_Vqf32_vmpy_Vqf32Vqf32(y_v, x_qf32_v); + y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, E_const); + y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, zero_v); + + // y = E2 + y * x; + E_const = Q6_V_vsplat_R(EXP_COEFF_2); + y_v = Q6_Vqf32_vmpy_Vqf32Vqf32(y_v, x_qf32_v); + y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, E_const); + y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, zero_v); + + // y = E1 + y * x; + E_const = Q6_V_vsplat_R(EXP_COEFF_1); + y_v = Q6_Vqf32_vmpy_Vqf32Vqf32(y_v, x_qf32_v); + y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, E_const); + y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, zero_v); + + // y = E0 + y * x; + E_const = Q6_V_vsplat_R(EXP_COEFF_0); + y_v = Q6_Vqf32_vmpy_Vqf32Vqf32(y_v, x_qf32_v); + y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, E_const); + y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, zero_v); + + // y = x + y * z; + y_v = Q6_Vqf32_vmpy_Vqf32Vqf32(y_v, z_qf32_v); + y_v = Q6_Vqf32_vadd_Vqf32Vqf32(y_v, x_qf32_v); + y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, zero_v); + + // y = y + 1.0; + y_v = Q6_Vqf32_vadd_Vqf32Vsf(y_v, Q6_V_vsplat_R(EXP_ONE)); + + // insert exponents + // y = ldexpf(y, k); + // y_v += k_v; // qf32 + // modify exponent + + y_v = Q6_Vsf_equals_Vqf32(y_v); + + // add k_v to the exponent of y_v + HVX_Vector y_v_exponent = Q6_Vw_vasl_VwR(y_v, 1); + + y_v_exponent = Q6_Vuw_vlsr_VuwR(y_v_exponent, IEEE_VSF_MANTLEN + 1); + y_v_exponent = Q6_Vw_vadd_VwVw(k_v, y_v_exponent); + + // exponent cannot be negative; if overflow is detected, result is set to zero + HVX_VectorPred qy_v_negative_exponent = Q6_Q_vcmp_gt_VwVw(zero_v, y_v_exponent); + + y_v = Q6_Vw_vaslacc_VwVwR(y_v, k_v, IEEE_VSF_MANTLEN); + + y_v = Q6_V_vmux_QVV(qy_v_negative_exponent, zero_v, y_v); + + return y_v; +} + +#define RSQRT_CONST 0x5f3759df // Constant for fast inverse square root calculation +#define RSQRT_ONE_HALF 0x3f000000 // 0.5 +#define RSQRT_THREE_HALVES 0x3fc00000 // 1.5 + +static inline HVX_Vector hvx_vec_rsqrt_fp32(HVX_Vector in_vec) { + //Algorithm : + // x2 = input*0.5 + // y = * (long *) &input + // y = 0x5f3759df - (y>>2) + // y = y*(threehalfs - x2*y*y) + + HVX_Vector rsqrtconst = Q6_V_vsplat_R(RSQRT_CONST); + HVX_Vector onehalf = Q6_V_vsplat_R(RSQRT_ONE_HALF); + HVX_Vector threehalfs = Q6_V_vsplat_R(RSQRT_THREE_HALVES); + + HVX_Vector x2, y, ypower2, temp; + + x2 = Q6_Vqf32_vmpy_VsfVsf(in_vec, onehalf); + x2 = Q6_Vqf32_vadd_Vqf32Vsf(x2, Q6_V_vzero()); + + y = Q6_Vw_vasr_VwR(in_vec, 1); + y = Q6_Vw_vsub_VwVw(rsqrtconst, y); + + // 1st iteration + ypower2 = Q6_Vqf32_vmpy_VsfVsf(y, y); + ypower2 = Q6_Vqf32_vadd_Vqf32Vsf(ypower2, Q6_V_vzero()); + temp = Q6_Vqf32_vmpy_Vqf32Vqf32(x2, ypower2); + temp = Q6_Vqf32_vsub_VsfVsf(threehalfs, Q6_Vsf_equals_Vqf32(temp)); + temp = Q6_Vqf32_vmpy_VsfVsf(y, Q6_Vsf_equals_Vqf32(temp)); + + // 2nd iteration + y = Q6_Vqf32_vadd_Vqf32Vsf(temp, Q6_V_vzero()); + ypower2 = Q6_Vqf32_vmpy_Vqf32Vqf32(y, y); + ypower2 = Q6_Vqf32_vadd_Vqf32Vsf(ypower2, Q6_V_vzero()); + temp = Q6_Vqf32_vmpy_Vqf32Vqf32(x2, ypower2); + temp = Q6_Vqf32_vsub_VsfVsf(threehalfs, Q6_Vsf_equals_Vqf32(temp)); + temp = Q6_Vqf32_vmpy_Vqf32Vqf32(y, temp); + + // 3rd iteration + y = Q6_Vqf32_vadd_Vqf32Vsf(temp, Q6_V_vzero()); + ypower2 = Q6_Vqf32_vmpy_Vqf32Vqf32(y, y); + ypower2 = Q6_Vqf32_vadd_Vqf32Vsf(ypower2, Q6_V_vzero()); + temp = Q6_Vqf32_vmpy_Vqf32Vqf32(x2, ypower2); + temp = Q6_Vqf32_vsub_VsfVsf(threehalfs, Q6_Vsf_equals_Vqf32(temp)); + temp = Q6_Vqf32_vmpy_Vqf32Vqf32(y, temp); + + return Q6_Vsf_equals_Vqf32(temp); +} + +static inline void hvx_fast_sigmoid_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems) { + int step_of_1 = num_elems >> 5; + int remaining = num_elems - step_of_1 * VLEN_FP32; + + assert(remaining == 0); + + const HVX_Vector * restrict v_src = (HVX_Vector *) src; + HVX_Vector * restrict v_dst = (HVX_Vector *) dst; + + #pragma unroll(4) + for (int i = 0; i < step_of_1; i++) { + v_dst[i] = hvx_vec_fast_sigmoid_fp32(v_src[i]); + } +} + +float hvx_sum_of_squares_f32(const uint8_t * restrict src, const int num_elems); +void hvx_mul_f32(const uint8_t * restrict src0, + const uint8_t * restrict src1, + uint8_t * restrict dst, + const int num_elems); +void hvx_mul_f32_opt(const uint8_t * restrict src0, + const uint8_t * restrict src1, + uint8_t * restrict dst, + const int num_elems); +void hvx_mul_mul_f32_opt(const uint8_t * restrict src0, + const uint8_t * restrict src1, + const uint8_t * restrict src2, + uint8_t * restrict dst, + const int num_elems); +void hvx_mul_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems); +void hvx_add_f32(const uint8_t * restrict src0, + const uint8_t * restrict src1, + uint8_t * restrict dst, + const int num_elems); +void hvx_add_f32_opt(const uint8_t * restrict src0, + const uint8_t * restrict src1, + uint8_t * restrict dst, + const int num_elems); +void hvx_add_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems); +void hvx_sub_f32(const uint8_t * restrict src0, + const uint8_t * restrict src1, + uint8_t * restrict dst, + const int num_elems); +void hvx_sub_f32_opt(const uint8_t * restrict src0, + const uint8_t * restrict src1, + uint8_t * restrict dst, + const int num_elems); +void hvx_sub_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems); +void hvx_scale_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems, const float scale); +void hvx_inverse_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems); +void hvx_sigmoid_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems); +void hvx_exp_f32(const uint8_t * restrict src, uint8_t * restrict dst, const int num_elems, bool negate); +float hvx_self_max_f32(const uint8_t * restrict src, const int num_elems); +float hvx_self_sum_f32(const uint8_t * restrict src, const int num_elems); +void hvx_min_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems); +void hvx_clamp_scalar_f32(const uint8_t * restrict src, + const float limit_left, + const float limit_right, + uint8_t * restrict dst, + const int num_elems); + +#endif /* HVX_UTILS_H */ diff --git a/ggml/src/ggml-hexagon/htp/main.c b/ggml/src/ggml-hexagon/htp/main.c new file mode 100644 index 0000000000000..e35ea3b0211c8 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/main.c @@ -0,0 +1,945 @@ +#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments" +#pragma clang diagnostic ignored "-Wunused-function" + +#define FARF_ERROR 1 +#define FARF_HIGH 1 +#define FARF_MEDIUM 0 +#define FARF_LOW 0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GGML_COMMON_DECL_C +#include "ggml-common.h" +#include "htp-ctx.h" +#include "htp-dma.h" +#include "htp-msg.h" +#include "htp-ops.h" +#include "ops-utils.h" +#include "worker-pool.h" + +AEEResult htp_iface_open(const char * uri, remote_handle64 * handle) { + struct htp_context * ctx; + int err = 0; + + ctx = calloc(1, sizeof(*ctx)); + if (ctx == NULL) { + return AEE_ENOMEMORY; + } + + // Use the context structure as a handle + *handle = (remote_handle64) ctx; + + // Enable FARF logs + HAP_setFARFRuntimeLoggingParams(0xffff, NULL, 0); + + // Set client class + { + HAP_power_request_t request; + memset(&request, 0, sizeof(HAP_power_request_t)); + request.type = HAP_power_set_apptype; + request.apptype = HAP_POWER_COMPUTE_CLIENT_CLASS; + + if ((err = HAP_power_set((void *) ctx, &request)) != 0) { + return err; + } + } + + { + HAP_power_request_t request; + memset(&request, 0, sizeof(request)); + + request.type = HAP_power_set_DCVS_v3; + request.dcvs_v3.set_dcvs_enable = TRUE; + request.dcvs_v3.dcvs_enable = TRUE; + request.dcvs_v3.dcvs_option = HAP_DCVS_V2_PERFORMANCE_MODE; + request.dcvs_v3.set_bus_params = TRUE; + request.dcvs_v3.bus_params.min_corner = HAP_DCVS_VCORNER_MAX; + request.dcvs_v3.bus_params.max_corner = HAP_DCVS_VCORNER_MAX; + request.dcvs_v3.bus_params.target_corner = HAP_DCVS_VCORNER_MAX; + request.dcvs_v3.set_core_params = TRUE; + request.dcvs_v3.core_params.min_corner = HAP_DCVS_VCORNER_MAX; + request.dcvs_v3.core_params.max_corner = HAP_DCVS_VCORNER_MAX; + request.dcvs_v3.core_params.target_corner = HAP_DCVS_VCORNER_MAX; + request.dcvs_v3.set_sleep_disable = TRUE; + request.dcvs_v3.sleep_disable = TRUE; + if ((err = HAP_power_set((void *) ctx, &request)) != 0) { + return err; + } + + memset(&request, 0, sizeof(request)); + request.type = HAP_power_set_HVX; + request.hvx.power_up = TRUE; + if ((err = HAP_power_set((void *) ctx, &request)) != 0) { + return err; + } + } + + { + // Power on HMX + HAP_power_request_t request; + memset(&request, 0, sizeof(HAP_power_request_t)); + request.type = HAP_power_set_HMX; + request.hmx.power_up = TRUE; + FARF(ALWAYS, "Powering HMX on\n"); + err = HAP_power_set((void *) &ctx, &request); + if (err != AEE_SUCCESS) { + FARF(ERROR, "Error powering on HMX."); + return err; + } + } + + return AEE_SUCCESS; +} + +AEEResult htp_iface_close(remote_handle64 handle) { + struct htp_context * ctx = (struct htp_context *) handle; + + if (!ctx) { + return AEE_EBADPARM; + } + + if (ctx->queue) { + FARF(ERROR, "Closing handle with queue still open"); + return AEE_EITEMBUSY; + } + + free(ctx); + return AEE_SUCCESS; +} + +AEEResult htp_iface_enable_etm(remote_handle64 handle) { + int err = HAP_user_etm_enable(); + if (err) { + if (err == AEE_EVERSIONNOTSUPPORT) { + FARF(ERROR, "API HAP_user_etm_enable is not supported\n"); + } else { + FARF(ERROR, "Error executing HAP_user_etm_enable with error code : 0x%x\n", err); + } + } + return err; +} + +AEEResult htp_iface_disable_etm(remote_handle64 handle) { + int err = HAP_user_etm_disable(); + if (err) { + if (err == AEE_EVERSIONNOTSUPPORT) { + FARF(ERROR, "API HAP_user_etm_disable is not supported\n"); + } else { + FARF(ERROR, "Error executing HAP_user_etm_disable with error code : 0x%x\n", err); + } + } + return err; +} + +static int vtcm_acquire(struct htp_context * ctx) { + if (!ctx->vtcm_valid) { + // Temporarily bump thread priority to make sure it's higher than other sessions. + // This way the resource manager will notify the other thread to release VTCM. + // Note that we need to reaquire VTCM at normal priority for this to work next time. + qurt_thread_set_priority(qurt_thread_get_id(), ctx->thread_prio - 10); + HAP_compute_res_acquire_cached(ctx->vtcm_rctx, 1000000); + HAP_compute_res_release_cached(ctx->vtcm_rctx); + qurt_thread_set_priority(qurt_thread_get_id(), ctx->thread_prio); + + HAP_compute_res_acquire_cached(ctx->vtcm_rctx, 1000000); + ctx->vtcm_valid = true; + } + + ctx->vtcm_inuse = true; + return 0; +} + +static int vtcm_release(struct htp_context * ctx) { + ctx->vtcm_inuse = false; + + if (ctx->vtcm_valid && ctx->vtcm_needs_release) { + ctx->vtcm_valid = false; + ctx->vtcm_needs_release = false; + HAP_compute_res_release_cached(ctx->vtcm_rctx); + } + + return 0; +} + +static int vtcm_release_callback(unsigned int rctx, void * state) { + struct htp_context * ctx = (struct htp_context *) state; + + if (!ctx || ctx->vtcm_rctx != rctx) { + return AEE_EBADPARM; + } + + // If VTCM is not inuse (not processing Ops) release it right here + // otherwise we'll release it once we're done with the current Op. + + if (ctx->vtcm_inuse) { + ctx->vtcm_needs_release = false; + return 0; + } + + ctx->vtcm_valid = false; + HAP_compute_res_release_cached(ctx->vtcm_rctx); + + return 0; +} + +static int vtcm_alloc(struct htp_context * ctx) { + unsigned int vtcm_size = 8 * 1024 * 1024; // 8MB default + HAP_compute_res_query_VTCM(0, &vtcm_size, NULL, NULL, NULL); + + compute_res_attr_t attr; + HAP_compute_res_attr_init(&attr); + HAP_compute_res_attr_set_serialize(&attr, 0); + HAP_compute_res_attr_set_cache_mode(&attr, 1); + HAP_compute_res_attr_set_vtcm_param_v2(&attr, vtcm_size, vtcm_size, vtcm_size); + HAP_compute_res_attr_set_release_callback(&attr, vtcm_release_callback, (void *) ctx); + HAP_compute_res_attr_set_hmx_param(&attr, 1); + + // Allocate VTCM for scratch pads + uint32_t rctx = HAP_compute_res_acquire(&attr, 1000000 /* timeout */); + if (!rctx) { + FARF(ERROR, "failed to allocate %zu bytes VTCM\n", ctx->vtcm_size); + return AEE_ENOMEMORY; + } + + void * vtcm_ptr; + if (HAP_compute_res_attr_get_vtcm_ptr_v2(&attr, &vtcm_ptr, &vtcm_size) != 0) { + HAP_compute_res_release(rctx); + FARF(ERROR, "failed to allocate %zu bytes VTCM (new)\n", ctx->vtcm_size); + return AEE_ENOMEMORY; + } + + ctx->vtcm_base = (uint8_t *) vtcm_ptr; + ctx->vtcm_size = vtcm_size; + ctx->vtcm_rctx = rctx; + ctx->vtcm_valid = false; + ctx->vtcm_inuse = false; + ctx->vtcm_needs_release = false; + + return 0; +} + +static void vtcm_free(struct htp_context * ctx) { + if (ctx->vtcm_rctx) { + HAP_compute_res_release(ctx->vtcm_rctx); + ctx->vtcm_base = 0; + ctx->vtcm_rctx = 0; + } +} + +static void htp_packet_callback(dspqueue_t queue, int error, void * context); +static void htp_error_callback(dspqueue_t queue, int error, void * context); + +AEEResult htp_iface_start(remote_handle64 handle, uint32 sess_id, uint64 dsp_queue_id, uint32 n_hvx) { + struct htp_context * ctx = (struct htp_context *) handle; + + if (!ctx) { + return AEE_EBADPARM; + } + + if (ctx->queue) { + FARF(ERROR, "Queue already open"); + return AEE_EITEMBUSY; + } + + // Import queue created on the CPU + int err = dspqueue_import(dsp_queue_id, // Queue ID from dspqueue_export + htp_packet_callback, // Packet callback + htp_error_callback, // Error callback; no errors expected on the DSP + (void *) ctx, // Callback context + &ctx->queue); + + if (err) { + FARF(ERROR, "Queue import failed with 0x%08x", (unsigned) err); + return err; + } + + ctx->thread_id = qurt_thread_get_id(); + ctx->thread_prio = qurt_thread_get_priority(ctx->thread_id); + + // allocate VTCM + err = vtcm_alloc(ctx); + if (err != AEE_SUCCESS) { + FARF(ERROR, "Unable to allocate VTCM"); + return AEE_ENOMEMORY; + } + + qurt_sysenv_max_hthreads_t hw_threads; + qurt_sysenv_get_max_hw_threads(&hw_threads); + uint32_t hw_nhvx = (qurt_hvx_get_units() >> 8) & 0xFF; + + if (n_hvx == 0) { + n_hvx = hw_nhvx; + } + if (n_hvx > hw_threads.max_hthreads) { + n_hvx = hw_threads.max_hthreads; + } + if (n_hvx > HTP_MAX_NTHREADS) { + n_hvx = HTP_MAX_NTHREADS; + } + + ctx->n_threads = n_hvx; + for (int i = 0; i < ctx->n_threads; i++) { + ctx->dma[i] = dma_queue_create(HTP_SPAD_SRC0_NROWS * 2); + } + + // init worker pool + err = worker_pool_init(&ctx->worker_pool, n_hvx); + if (err != AEE_SUCCESS) { + FARF(ERROR, "Unable to create worker pool"); + return err; + } + + FARF(HIGH, "session %u started: n-hvx %u vtcm-size %zu vtcm-rctx %u n-threads %u thread-id %d thread-prio %d \n", + sess_id, hw_nhvx, ctx->vtcm_size, ctx->vtcm_rctx, ctx->n_threads, ctx->thread_id, ctx->thread_prio); + + return AEE_SUCCESS; +} + +AEEResult htp_iface_stop(remote_handle64 handle) { + struct htp_context * ctx = (struct htp_context *) handle; + if (!ctx) { + return AEE_EBADPARM; + } + + if (!ctx->queue) { + FARF(ERROR, "Queue not open"); + return AEE_EBADSTATE; + } + + // Close queue. dspqueue_close() will also wait for callbacks to finish. + int err = dspqueue_close(ctx->queue); + ctx->queue = NULL; + if (err != 0) { + FARF(ERROR, "Queue close failed with 0x%08x", (unsigned) err); + return err; + } + + if (ctx->worker_pool) { + // Release worker pool + worker_pool_release(&ctx->worker_pool); + } + + for (int i = 0; i < ctx->n_threads; i++) { + dma_queue_delete(ctx->dma[i]); + } + + vtcm_free(ctx); + + return AEE_SUCCESS; +} + +static void htp_error_callback(dspqueue_t queue, int error, void * context) { + // No errors expected on the DSP. + FARF(ERROR, "Error callback: 0x%08x", (unsigned) error); +} + +struct profile_data { + uint64_t usecs; + uint64_t cycles; + uint64_t pkts; +}; + +static inline void profile_start(struct profile_data * d) { + d->usecs = HAP_perf_get_qtimer_count(); + d->cycles = htp_get_cycles(); + d->pkts = htp_get_pktcnt(); +} + +static inline void profile_stop(struct profile_data * d) { + d->usecs = HAP_perf_qtimer_count_to_us(HAP_perf_get_qtimer_count() - d->usecs); + d->cycles = htp_get_cycles() - d->cycles; + d->pkts = htp_get_pktcnt() - d->pkts; +} + +static int send_htp_rsp(struct htp_context * c, + uint32_t op, + uint32_t status, + struct dspqueue_buffer * bufs, + size_t n_bufs, + struct profile_data * prof) { + // Prep response struct + struct htp_general_rsp rsp; + rsp.op = op; + rsp.status = status; + rsp.prof_usecs = prof->usecs; + rsp.prof_cycles = prof->cycles; + rsp.prof_pkts = prof->pkts; + + int err = dspqueue_write(c->queue, + 0, // Flags + n_bufs, + bufs, // Buffer references + sizeof(rsp), + (const uint8_t *) &rsp, // Message + DSPQUEUE_TIMEOUT_NONE); + + if (err != 0) { + FARF(ERROR, "dspqueue_write failed: 0x%08x", (unsigned) err); + } + + return err; +} + +static void proc_matmul_req(struct htp_context * ctx, + struct htp_general_req * req, + struct dspqueue_buffer * bufs, + size_t n_bufs) { + // Prep response buffer structs (needed for error responses, etc) + struct dspqueue_buffer rsp_bufs[HTP_MAX_PACKET_BUFFERS]; + memset(rsp_bufs, 0, sizeof(rsp_bufs)); + rsp_bufs[0].fd = bufs[0].fd; + rsp_bufs[0].ptr = bufs[0].ptr; + rsp_bufs[0].size = bufs[0].size; + rsp_bufs[0].offset = bufs[0].offset; + rsp_bufs[0].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + rsp_bufs[1].fd = bufs[1].fd; + rsp_bufs[1].ptr = bufs[1].ptr; + rsp_bufs[1].size = bufs[1].size; + rsp_bufs[1].offset = bufs[1].offset; + rsp_bufs[1].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + // We had written to the output buffer, we'd also need to flush it + rsp_bufs[2].fd = bufs[2].fd; + rsp_bufs[2].ptr = bufs[2].ptr; + rsp_bufs[2].size = bufs[2].size; + rsp_bufs[2].offset = bufs[2].offset; + rsp_bufs[2].flags = (DSPQUEUE_BUFFER_FLAG_DEREF | // Release reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush NSP + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU + + // Setup Op context + struct htp_ops_context octx = { 0 }; + octx.ctx = ctx; + octx.src0 = req->src0; + octx.src1 = req->src1; + octx.dst = req->dst; + octx.flags = req->flags; + octx.op = req->op; + + // Update data pointers + octx.src0.data = (uint32_t) bufs[0].ptr; + octx.src1.data = (uint32_t) bufs[1].ptr; + octx.dst.data = (uint32_t) bufs[2].ptr; + octx.n_threads = ctx->n_threads; + + struct profile_data prof; + profile_start(&prof); + + uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; + if (vtcm_acquire(ctx) == AEE_SUCCESS) { + rsp_status = op_matmul(&octx); + vtcm_release(ctx); + } + + profile_stop(&prof); + send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, 3, &prof); +} + +static void proc_matmul_id_req(struct htp_context * ctx, + struct htp_general_req * req, + struct dspqueue_buffer * bufs, + size_t n_bufs) { + // Prep response buffer structs (needed for error responses, etc) + struct dspqueue_buffer rsp_bufs[HTP_MAX_PACKET_BUFFERS]; + memset(rsp_bufs, 0, sizeof(rsp_bufs)); + rsp_bufs[0].fd = bufs[0].fd; + rsp_bufs[0].ptr = bufs[0].ptr; + rsp_bufs[0].size = bufs[0].size; + rsp_bufs[0].offset = bufs[0].offset; + rsp_bufs[0].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + rsp_bufs[1].fd = bufs[1].fd; + rsp_bufs[1].ptr = bufs[1].ptr; + rsp_bufs[1].size = bufs[1].size; + rsp_bufs[1].offset = bufs[1].offset; + rsp_bufs[1].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + rsp_bufs[2].fd = bufs[2].fd; + rsp_bufs[2].ptr = bufs[2].ptr; + rsp_bufs[2].size = bufs[2].size; + rsp_bufs[2].offset = bufs[2].offset; + rsp_bufs[2].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + // We had written to the output buffer, we'd also need to flush it + rsp_bufs[3].fd = bufs[3].fd; + rsp_bufs[3].ptr = bufs[3].ptr; + rsp_bufs[3].size = bufs[3].size; + rsp_bufs[3].offset = bufs[3].offset; + rsp_bufs[3].flags = (DSPQUEUE_BUFFER_FLAG_DEREF | // Release reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush NSP + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU + + // Setup Op context + struct htp_ops_context octx = { 0 }; + octx.ctx = ctx; + octx.src0 = req->src0; + octx.src1 = req->src1; + octx.src2 = req->src2; + octx.dst = req->dst; + octx.flags = req->flags; + octx.op = req->op; + + // Update data pointers + octx.src0.data = (uint32_t) bufs[0].ptr; + octx.src1.data = (uint32_t) bufs[1].ptr; + octx.src2.data = (uint32_t) bufs[2].ptr; + octx.dst.data = (uint32_t) bufs[3].ptr; + octx.n_threads = ctx->n_threads; + + struct profile_data prof; + profile_start(&prof); + + uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; + if (vtcm_acquire(ctx) == AEE_SUCCESS) { + rsp_status = op_matmul_id(&octx); + vtcm_release(ctx); + } + + profile_stop(&prof); + send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, 4, &prof); +} + +static void proc_binary_req(struct htp_context * ctx, struct htp_general_req * req, struct dspqueue_buffer * bufs) { + struct dspqueue_buffer rsp_bufs[HTP_MAX_PACKET_BUFFERS]; + memset(rsp_bufs, 0, sizeof(rsp_bufs)); + + rsp_bufs[0].fd = bufs[0].fd; + rsp_bufs[0].ptr = bufs[0].ptr; + rsp_bufs[0].offset = bufs[0].offset; + rsp_bufs[0].size = bufs[0].size; + rsp_bufs[0].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + rsp_bufs[1].fd = bufs[1].fd; + rsp_bufs[1].ptr = bufs[1].ptr; + rsp_bufs[1].offset = bufs[1].offset; + rsp_bufs[1].size = bufs[1].size; + rsp_bufs[1].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + // We had written to the output buffer, we'd also need to flush it + rsp_bufs[2].fd = bufs[2].fd; + rsp_bufs[2].ptr = bufs[2].ptr; + rsp_bufs[2].offset = bufs[2].offset; + rsp_bufs[2].size = bufs[2].size; + rsp_bufs[2].flags = (DSPQUEUE_BUFFER_FLAG_DEREF | // Release reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush NSP + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU + + // Setup Op context + struct htp_ops_context octx = { 0 }; + octx.ctx = ctx; + octx.src0 = req->src0; + octx.src1 = req->src1; + octx.dst = req->dst; + octx.flags = req->flags; + octx.op = req->op; + + // Update data pointers + octx.src0.data = (uint32_t) bufs[0].ptr; + octx.src1.data = (uint32_t) bufs[1].ptr; + octx.dst.data = (uint32_t) bufs[2].ptr; + octx.n_threads = ctx->n_threads; + + struct profile_data prof; + profile_start(&prof); + + uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; + if (vtcm_acquire(ctx) == AEE_SUCCESS) { + rsp_status = op_binary(&octx); + vtcm_release(ctx); + } + + profile_stop(&prof); + send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, 3, &prof); +} + +static void proc_add_id_req(struct htp_context * ctx, struct htp_general_req * req, struct dspqueue_buffer * bufs) { + struct dspqueue_buffer rsp_bufs[HTP_MAX_PACKET_BUFFERS]; + memset(rsp_bufs, 0, sizeof(rsp_bufs)); + + rsp_bufs[0].fd = bufs[0].fd; + rsp_bufs[0].ptr = bufs[0].ptr; + rsp_bufs[0].offset = bufs[0].offset; + rsp_bufs[0].size = bufs[0].size; + rsp_bufs[0].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + rsp_bufs[1].fd = bufs[1].fd; + rsp_bufs[1].ptr = bufs[1].ptr; + rsp_bufs[1].offset = bufs[1].offset; + rsp_bufs[1].size = bufs[1].size; + rsp_bufs[1].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + rsp_bufs[2].fd = bufs[2].fd; + rsp_bufs[2].ptr = bufs[2].ptr; + rsp_bufs[2].offset = bufs[2].offset; + rsp_bufs[2].size = bufs[2].size; + rsp_bufs[2].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + // We had written to the output buffer, we'd also need to flush it + rsp_bufs[3].fd = bufs[3].fd; + rsp_bufs[3].ptr = bufs[3].ptr; + rsp_bufs[3].offset = bufs[3].offset; + rsp_bufs[3].size = bufs[3].size; + rsp_bufs[3].flags = (DSPQUEUE_BUFFER_FLAG_DEREF | // Release reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush NSP + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU + + // Setup Op context + struct htp_ops_context octx = { 0 }; + octx.ctx = ctx; + octx.src0 = req->src0; + octx.src1 = req->src1; + octx.src2 = req->src2; + octx.dst = req->dst; + octx.flags = req->flags; + octx.op = req->op; + + // Update data pointers + octx.src0.data = (uint32_t) bufs[0].ptr; + octx.src1.data = (uint32_t) bufs[1].ptr; + octx.src2.data = (uint32_t) bufs[2].ptr; + octx.dst.data = (uint32_t) bufs[3].ptr; + octx.n_threads = ctx->n_threads; + + struct profile_data prof; + profile_start(&prof); + + uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; + if (vtcm_acquire(ctx) == AEE_SUCCESS) { + rsp_status = op_binary(&octx); + vtcm_release(ctx); + } + + profile_stop(&prof); + send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, 4, &prof); +} + +static void proc_unary_req(struct htp_context * ctx, struct htp_general_req * req, struct dspqueue_buffer * bufs) { + struct dspqueue_buffer rsp_bufs[HTP_MAX_PACKET_BUFFERS]; + memset(rsp_bufs, 0, sizeof(rsp_bufs)); + + rsp_bufs[0].fd = bufs[0].fd; + rsp_bufs[0].ptr = bufs[0].ptr; + rsp_bufs[0].offset = bufs[0].offset; + rsp_bufs[0].size = bufs[0].size; + rsp_bufs[0].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + // We had written to the output buffer, we'd also need to flush it + rsp_bufs[1].fd = bufs[1].fd; + rsp_bufs[1].ptr = bufs[1].ptr; + rsp_bufs[1].offset = bufs[1].offset; + rsp_bufs[1].size = bufs[1].size; + rsp_bufs[1].flags = (DSPQUEUE_BUFFER_FLAG_DEREF | // Release reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush NSP + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU + + // Setup Op context + struct htp_ops_context octx = { 0 }; + octx.ctx = ctx; + octx.src0 = req->src0; + octx.dst = req->dst; + octx.flags = req->flags; + octx.op = req->op; + + memcpy(octx.op_params, req->op_params, sizeof(octx.op_params)); + + // Update data pointers + octx.src0.data = (uint32_t) bufs[0].ptr; + octx.dst.data = (uint32_t) bufs[1].ptr; + octx.n_threads = ctx->n_threads; + + struct profile_data prof; + profile_start(&prof); + + uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; + if (vtcm_acquire(ctx) == AEE_SUCCESS) { + rsp_status = op_unary(&octx); + vtcm_release(ctx); + } + + profile_stop(&prof); + send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, 2, &prof); +} + +static void proc_activations_req(struct htp_context * ctx, + struct htp_general_req * req, + struct dspqueue_buffer * bufs, + uint32_t n_bufs) { + struct dspqueue_buffer rsp_bufs[HTP_MAX_PACKET_BUFFERS]; + memset(rsp_bufs, 0, sizeof(rsp_bufs)); + + rsp_bufs[0].fd = bufs[0].fd; + rsp_bufs[0].ptr = bufs[0].ptr; + rsp_bufs[0].offset = bufs[0].offset; + rsp_bufs[0].size = bufs[0].size; + rsp_bufs[0].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + int write_idx = 1; + if (3 == n_bufs) { + rsp_bufs[1].fd = bufs[1].fd; + rsp_bufs[1].ptr = bufs[1].ptr; + rsp_bufs[1].offset = bufs[1].offset; + rsp_bufs[1].size = bufs[1].size; + rsp_bufs[1].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + write_idx = 2; + } + + // We had written to the output buffer, we'd also need to flush it + rsp_bufs[write_idx].fd = bufs[write_idx].fd; + rsp_bufs[write_idx].ptr = bufs[write_idx].ptr; + rsp_bufs[write_idx].offset = bufs[write_idx].offset; + rsp_bufs[write_idx].size = bufs[write_idx].size; + rsp_bufs[write_idx].flags = (DSPQUEUE_BUFFER_FLAG_DEREF | // Release reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush NSP + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU + + // Setup Op context + struct htp_ops_context octx = { 0 }; + octx.ctx = ctx; + octx.src0 = req->src0; + if (3 == n_bufs) { + octx.src1 = req->src1; + } + octx.dst = req->dst; + octx.flags = req->flags; + octx.op = req->op; + + memcpy(octx.op_params, req->op_params, sizeof(octx.op_params)); + + // Update data pointers + octx.src0.data = (uint32_t) bufs[0].ptr; + if (3 == n_bufs) { + octx.src1.data = (uint32_t) bufs[1].ptr; + octx.dst.data = (uint32_t) bufs[2].ptr; + } else { + octx.dst.data = (uint32_t) bufs[1].ptr; + } + octx.n_threads = ctx->n_threads; + + struct profile_data prof; + profile_start(&prof); + + uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; + if (vtcm_acquire(ctx) == AEE_SUCCESS) { + if (octx.op == HTP_OP_SOFTMAX) { + rsp_status = op_softmax(&octx); + } else { + rsp_status = op_activations(&octx); + } + vtcm_release(ctx); + } + + profile_stop(&prof); + send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, n_bufs, &prof); +} + +static void proc_rope_req(struct htp_context * ctx, + struct htp_general_req * req, + struct dspqueue_buffer * bufs, + uint32_t n_bufs) { + struct dspqueue_buffer rsp_bufs[HTP_MAX_PACKET_BUFFERS]; + memset(rsp_bufs, 0, sizeof(rsp_bufs)); + + rsp_bufs[0].fd = bufs[0].fd; + rsp_bufs[0].ptr = bufs[0].ptr; + rsp_bufs[0].offset = bufs[0].offset; + rsp_bufs[0].size = bufs[0].size; + rsp_bufs[0].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + rsp_bufs[1].fd = bufs[1].fd; + rsp_bufs[1].ptr = bufs[1].ptr; + rsp_bufs[1].offset = bufs[1].offset; + rsp_bufs[1].size = bufs[1].size; + rsp_bufs[1].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + int write_idx = 2; + if (4 == n_bufs) { + rsp_bufs[write_idx].fd = bufs[write_idx].fd; + rsp_bufs[write_idx].ptr = bufs[write_idx].ptr; + rsp_bufs[write_idx].offset = bufs[write_idx].offset; + rsp_bufs[write_idx].size = bufs[write_idx].size; + rsp_bufs[write_idx].flags = DSPQUEUE_BUFFER_FLAG_DEREF; // Release reference + + write_idx++; + } + + // We had written to the output buffer, we'd also need to flush it + rsp_bufs[write_idx].fd = bufs[write_idx].fd; + rsp_bufs[write_idx].ptr = bufs[write_idx].ptr; + rsp_bufs[write_idx].offset = bufs[write_idx].offset; + rsp_bufs[write_idx].size = bufs[write_idx].size; + rsp_bufs[write_idx].flags = (DSPQUEUE_BUFFER_FLAG_DEREF | // Release reference + DSPQUEUE_BUFFER_FLAG_FLUSH_SENDER | // Flush NSP + DSPQUEUE_BUFFER_FLAG_INVALIDATE_RECIPIENT); // Invalidate CPU + + // Setup Op context + struct htp_ops_context octx = { 0 }; + octx.ctx = ctx; + octx.src0 = req->src0; + octx.src1 = req->src1; + if (4 == n_bufs) { + octx.src2 = req->src2; + } + octx.dst = req->dst; + octx.flags = req->flags; + octx.op = req->op; + + memcpy(octx.op_params, req->op_params, sizeof(octx.op_params)); + + // Update data pointers + octx.src0.data = (uint32_t) bufs[0].ptr; + octx.src1.data = (uint32_t) bufs[1].ptr; + if (4 == n_bufs) { + octx.src2.data = (uint32_t) bufs[2].ptr; + octx.dst.data = (uint32_t) bufs[3].ptr; + } else { + octx.dst.data = (uint32_t) bufs[2].ptr; + } + octx.n_threads = ctx->n_threads; + + struct profile_data prof; + profile_start(&prof); + + uint32_t rsp_status = HTP_STATUS_INTERNAL_ERR; + if (vtcm_acquire(ctx) == AEE_SUCCESS) { + rsp_status = op_rope(&octx); + vtcm_release(ctx); + } + + profile_stop(&prof); + send_htp_rsp(ctx, req->op, rsp_status, rsp_bufs, n_bufs, &prof); +} + +static void htp_packet_callback(dspqueue_t queue, int error, void * context) { + struct htp_context * ctx = (struct htp_context *) context; + + // Repeatedly read packets from the queue until it's empty. We don't + // necessarily get a separate callback for each packet, and new packets + // may arrive while we're processing the previous one. This ensures we + // keep the DSP busy as much as possible and avoid waiting for the CPU. + + while (1) { + struct htp_general_req req; + uint32_t req_size; + + struct dspqueue_buffer bufs[HTP_MAX_PACKET_BUFFERS]; + uint32_t n_bufs; + uint32_t flags; + + // Read packet from queue + int err = dspqueue_read_noblock(queue, &flags, + HTP_MAX_PACKET_BUFFERS, // Maximum number of buffer references + &n_bufs, // Number of buffer references + bufs, // Buffer references + sizeof(req), // Max message length + &req_size, // Message length + (uint8_t *) &req); // Message + + if (err == AEE_EWOULDBLOCK) { + // Consumed all packets available for now + return; + } + + if (err != 0) { + FARF(ERROR, "dspqueue_read_noblock failed: 0x%08x", (unsigned) err); + return; + } + + if (req_size != sizeof(req)) { + FARF(ERROR, "Invalid request size"); + continue; + } + + if (req.flags & HTP_OPFLAGS_EARLY_WAKEUP) { + // Host wants early notification + dspqueue_write_early_wakeup_noblock(ctx->queue, 10, 0); + } + + // Process packet based on its message type + switch (req.op) { + case HTP_OP_MUL_MAT: + if (n_bufs != 3) { + FARF(ERROR, "Bad matmul-req buffer list"); + continue; + } + proc_matmul_req(ctx, &req, bufs, n_bufs); + break; + + case HTP_OP_MUL_MAT_ID: + if (n_bufs != 4) { + FARF(ERROR, "Bad matmul-id-req buffer list"); + continue; + } + proc_matmul_id_req(ctx, &req, bufs, n_bufs); + break; + + case HTP_OP_MUL: + case HTP_OP_ADD: + case HTP_OP_SUB: + if (n_bufs != 3) { + FARF(ERROR, "Bad binary-req buffer list"); + continue; + } + proc_binary_req(ctx, &req, bufs); + break; + + case HTP_OP_RMS_NORM: + if (n_bufs != 2) { + FARF(ERROR, "Bad unary-req buffer list"); + continue; + } + + proc_unary_req(ctx, &req, bufs); + break; + + case HTP_OP_UNARY_SILU: + if (n_bufs != 2) { + FARF(ERROR, "Bad act-req buffer list"); + continue; + } + proc_activations_req(ctx, &req, bufs, n_bufs); + break; + + case HTP_OP_GLU_SWIGLU: + case HTP_OP_SOFTMAX: + if ((n_bufs != 2) && (n_bufs != 3)) { + FARF(ERROR, "Bad act-req buffer list"); + continue; + } + proc_activations_req(ctx, &req, bufs, n_bufs); + break; + + case HTP_OP_ADD_ID: + if (n_bufs != 4) { + FARF(ERROR, "Bad add-id-req buffer list"); + continue; + } + proc_add_id_req(ctx, &req, bufs); + break; + + case HTP_OP_ROPE: + if ((n_bufs != 3) && (n_bufs != 4)) { + FARF(ERROR, "Bad rope-req buffer list"); + continue; + } + proc_rope_req(ctx, &req, bufs, n_bufs); + break; + + default: + FARF(ERROR, "Unknown Op %u", req.op); + break; + } + } +} diff --git a/ggml/src/ggml-hexagon/htp/matmul-ops.c b/ggml/src/ggml-hexagon/htp/matmul-ops.c new file mode 100644 index 0000000000000..c99b6a0d18efb --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/matmul-ops.c @@ -0,0 +1,2223 @@ +#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wunused-variable" +#pragma clang diagnostic ignored "-Wunused-but-set-variable" + +#ifdef HTP_DEBUG +# define FARF_HIGH 1 +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GGML_COMMON_DECL_C +#include "ggml-common.h" +#include "htp-ctx.h" +#include "htp-dma.h" +#include "htp-msg.h" +#include "htp-ops.h" +#include "hvx-utils.h" +#include "ops-utils.h" + +struct htp_matmul_type { + const char * type; + void (*vec_dot)(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); + void (*vec_dot_rx2)(const int n, + float * restrict s, + const void * restrict vx, + uint32_t vx_row_size, + const void * restrict vy); +}; + +typedef struct { + HVX_Vector v[2]; +} HVX_Vector_x2; + +typedef struct { + HVX_Vector v[4]; +} HVX_Vector_x4; + +typedef struct { + HVX_Vector v[8]; +} HVX_Vector_x8; + +// vdelta control to replicate first 4x fp32 values across lanes +static const uint8_t __attribute__((aligned(128))) repl_4x_fp32[128] = { + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, + 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, + 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, + 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x40, 0x40, 0x40, 0x40, + 0x44, 0x44, 0x44, 0x44, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, + 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, +}; + +// vdelta control to replicate and interleave first 8x fp32 values across lanes +static const uint8_t __attribute__((aligned(128))) repl_interleave_8x_fp32[128] = { + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, + 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, + 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x40, 0x40, 0x40, 0x40, + 0x44, 0x44, 0x44, 0x44, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x40, 0x40, 0x40, 0x40, 0x44, 0x44, 0x44, + 0x44, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, + 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, +}; + +// vdelta control to replicate first fp32 value across all elements +static const uint8_t __attribute__((aligned(128))) repl_1x_fp32[128] = { + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, + 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, + 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, + 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x40, 0x40, 0x40, 0x40, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, + 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, + 0x04, 0x20, 0x20, 0x20, 0x20, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x10, 0x10, + 0x10, 0x10, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, +}; + +// vdelta control to replicate first fp16 value across all elements +static const uint8_t __attribute__((aligned(128))) repl_1x_fp16[128] = { + 0x00, 0x00, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x10, 0x10, 0x02, + 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x20, 0x20, 0x02, 0x02, 0x04, 0x04, + 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x10, 0x10, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, + 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x40, 0x40, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, + 0x04, 0x04, 0x02, 0x02, 0x10, 0x10, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, + 0x02, 0x20, 0x20, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x10, 0x10, + 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, 0x08, 0x08, 0x02, 0x02, 0x04, 0x04, 0x02, 0x02, +}; + +// vdelta control to expand first 32 e8m0 values into 32 uint32 elements +static const uint8_t __attribute__((aligned(128))) expand_x32_e8m0[128] = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x02, 0x00, 0x08, 0x08, 0x01, 0x02, 0x00, 0x04, 0x04, 0x00, 0x00, + 0x00, 0x11, 0x10, 0x10, 0x10, 0x02, 0x00, 0x04, 0x00, 0x01, 0x02, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00, 0x01, 0x04, + 0x00, 0x00, 0x22, 0x20, 0x20, 0x20, 0x21, 0x22, 0x20, 0x24, 0x04, 0x00, 0x00, 0x00, 0x09, 0x08, 0x00, 0x00, 0x02, + 0x00, 0x04, 0x00, 0x11, 0x12, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x01, 0x04, 0x00, 0x00, 0x02, 0x00, 0x08, 0x08, + 0x01, 0x02, 0x00, 0x04, 0x44, 0x40, 0x40, 0x40, 0x41, 0x40, 0x40, 0x40, 0x42, 0x40, 0x44, 0x40, 0x41, 0x42, 0x48, + 0x48, 0x08, 0x08, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x12, 0x10, 0x10, 0x10, 0x01, 0x02, 0x00, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x09, 0x08, 0x00, 0x00, 0x22, 0x20, 0x24, 0x20, 0x21, 0x22, 0x20, 0x20, +}; + +static const uint8_t __attribute__((aligned(VLEN))) kvalues_mxfp4_lut[] = { + 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 6, 0, 8, 0, 12, 0, 0, 0, 0xff, 0, 0xfe, 0, 0xfd, 0, 0xfc, 0, + 0xfa, 0, 0xf8, 0, 0xf4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +// q4x4x2 and q8x4x2 are the flat q4/8_0 formats where all quants are stored first followed by all scales + +static inline size_t q8x4x2_row_size(uint32_t ne) { + // ensures perfect alignment of quants and full row + const uint32_t qk = QK_Q8_0x4x2; + const uint32_t nb = (ne + qk - 1) / qk; + return htp_round_up(ne + nb * 8 * sizeof(__fp16), 128); +} + +static inline HVX_Vector_x8 hvx_vec_load_q4x4x8(const uint8_t * restrict ptr) { + const HVX_Vector * restrict vptr = (const HVX_Vector *) ptr; + + HVX_Vector v0_1 = vptr[0]; // first 256 elements (128 bytes) + HVX_Vector v2_3 = vptr[1]; // ... + HVX_Vector v4_5 = vptr[2]; // ... + HVX_Vector v6_7 = vptr[3]; // ... + + const HVX_Vector mask_h4 = Q6_Vb_vsplat_R(0x0F); + + HVX_Vector v0 = Q6_V_vand_VV(v0_1, mask_h4); // & 0x0F + HVX_Vector v1 = Q6_Vub_vlsr_VubR(v0_1, 4); // >> 4 + HVX_Vector v2 = Q6_V_vand_VV(v2_3, mask_h4); // & 0x0F + HVX_Vector v3 = Q6_Vub_vlsr_VubR(v2_3, 4); // >> 4 + HVX_Vector v4 = Q6_V_vand_VV(v4_5, mask_h4); // & 0x0F + HVX_Vector v5 = Q6_Vub_vlsr_VubR(v4_5, 4); // >> 4 + HVX_Vector v6 = Q6_V_vand_VV(v6_7, mask_h4); // & 0x0F + HVX_Vector v7 = Q6_Vub_vlsr_VubR(v6_7, 4); // >> 4 + + // Convert uint4 to int4 (i.e. x - 8) + const HVX_Vector i8 = Q6_Vb_vsplat_R(8); + v0 = Q6_Vb_vsub_VbVb(v0, i8); + v1 = Q6_Vb_vsub_VbVb(v1, i8); + v2 = Q6_Vb_vsub_VbVb(v2, i8); + v3 = Q6_Vb_vsub_VbVb(v3, i8); + v4 = Q6_Vb_vsub_VbVb(v4, i8); + v5 = Q6_Vb_vsub_VbVb(v5, i8); + v6 = Q6_Vb_vsub_VbVb(v6, i8); + v7 = Q6_Vb_vsub_VbVb(v7, i8); + + HVX_Vector_x8 r = { v0, v1, v2, v3, v4, v5, v6, v7 }; + return r; +} + +static inline HVX_Vector_x8 hvx_vec_load_mxfp4x4x8(const uint8_t * restrict ptr) { + const HVX_Vector * restrict vptr = (const HVX_Vector *) ptr; + + HVX_Vector v0_1 = vptr[0]; // first 256 elements (128 bytes) + HVX_Vector v2_3 = vptr[1]; // ... + HVX_Vector v4_5 = vptr[2]; // ... + HVX_Vector v6_7 = vptr[3]; // ... + + const HVX_Vector mask_h4 = Q6_Vb_vsplat_R(0x0F); + + HVX_Vector v0 = Q6_V_vand_VV(v0_1, mask_h4); // & 0x0F + HVX_Vector v1 = Q6_Vub_vlsr_VubR(v0_1, 4); // >> 4 + HVX_Vector v2 = Q6_V_vand_VV(v2_3, mask_h4); // & 0x0F + HVX_Vector v3 = Q6_Vub_vlsr_VubR(v2_3, 4); // >> 4 + HVX_Vector v4 = Q6_V_vand_VV(v4_5, mask_h4); // & 0x0F + HVX_Vector v5 = Q6_Vub_vlsr_VubR(v4_5, 4); // >> 4 + HVX_Vector v6 = Q6_V_vand_VV(v6_7, mask_h4); // & 0x0F + HVX_Vector v7 = Q6_Vub_vlsr_VubR(v6_7, 4); // >> 4 + + HVX_Vector lut = *(const HVX_Vector *) kvalues_mxfp4_lut; + v0 = Q6_Vb_vlut32_VbVbI(v0, lut, 0); + v1 = Q6_Vb_vlut32_VbVbI(v1, lut, 0); + v2 = Q6_Vb_vlut32_VbVbI(v2, lut, 0); + v3 = Q6_Vb_vlut32_VbVbI(v3, lut, 0); + v4 = Q6_Vb_vlut32_VbVbI(v4, lut, 0); + v5 = Q6_Vb_vlut32_VbVbI(v5, lut, 0); + v6 = Q6_Vb_vlut32_VbVbI(v6, lut, 0); + v7 = Q6_Vb_vlut32_VbVbI(v7, lut, 0); + + HVX_Vector_x8 r = { v0, v1, v2, v3, v4, v5, v6, v7 }; + return r; +} + +static inline HVX_Vector_x8 hvx_vec_load_q8x4x8(const uint8_t * restrict ptr) { + const HVX_Vector * restrict vptr = (const HVX_Vector *) ptr; + + HVX_Vector v0 = vptr[0]; // first 128 vals + HVX_Vector v1 = vptr[1]; // ... + HVX_Vector v2 = vptr[2]; // ... + HVX_Vector v3 = vptr[3]; // ... + HVX_Vector v4 = vptr[4]; // ... + HVX_Vector v5 = vptr[5]; // ... + HVX_Vector v6 = vptr[6]; // ... + HVX_Vector v7 = vptr[7]; // ... + + HVX_Vector_x8 r = { v0, v1, v2, v3, v4, v5, v6, v7 }; + return r; +} + +static inline HVX_Vector_x4 hvx_vec_load_x4_f16(const uint8_t * restrict ptr) { + const HVX_Vector * restrict vptr = (const HVX_Vector *) ptr; + + HVX_Vector v0 = vptr[0]; // first 64 vals + HVX_Vector v1 = vptr[1]; // second 64 vals + HVX_Vector v2 = vptr[2]; // third 64 vals + HVX_Vector v3 = vptr[3]; // forth 64 vals + + HVX_Vector_x4 r = { v0, v1, v2, v3 }; + return r; +} + +static inline HVX_Vector_x4 hvx_vec_load_x4_f32_as_f16(const uint8_t * restrict ptr) { + const HVX_VectorPair * restrict vptr = (const HVX_VectorPair *) ptr; + + HVX_VectorPair v0 = vptr[0]; // first 64 vals + HVX_VectorPair v1 = vptr[1]; // second 64 vals + HVX_VectorPair v2 = vptr[2]; // third 64 vals + HVX_VectorPair v3 = vptr[3]; // forth 64 vals + + HVX_Vector vq0_lo = Q6_Vqf32_vsub_VsfVsf(Q6_V_lo_W(v0), Q6_V_vzero()); + HVX_Vector vq0_hi = Q6_Vqf32_vsub_VsfVsf(Q6_V_hi_W(v0), Q6_V_vzero()); + HVX_Vector vq1_lo = Q6_Vqf32_vsub_VsfVsf(Q6_V_lo_W(v1), Q6_V_vzero()); + HVX_Vector vq1_hi = Q6_Vqf32_vsub_VsfVsf(Q6_V_hi_W(v1), Q6_V_vzero()); + HVX_Vector vq2_lo = Q6_Vqf32_vsub_VsfVsf(Q6_V_lo_W(v2), Q6_V_vzero()); + HVX_Vector vq2_hi = Q6_Vqf32_vsub_VsfVsf(Q6_V_hi_W(v2), Q6_V_vzero()); + HVX_Vector vq3_lo = Q6_Vqf32_vsub_VsfVsf(Q6_V_lo_W(v3), Q6_V_vzero()); + HVX_Vector vq3_hi = Q6_Vqf32_vsub_VsfVsf(Q6_V_hi_W(v3), Q6_V_vzero()); + + HVX_Vector vh0 = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vq0_hi, vq0_lo)); + HVX_Vector vh1 = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vq1_hi, vq1_lo)); + HVX_Vector vh2 = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vq2_hi, vq2_lo)); + HVX_Vector vh3 = Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vq3_hi, vq3_lo)); + + // vcombine does a shuffle, use vdeal to undo + + HVX_Vector_x4 r = { Q6_Vh_vdeal_Vh(vh0), Q6_Vh_vdeal_Vh(vh1), Q6_Vh_vdeal_Vh(vh2), Q6_Vh_vdeal_Vh(vh3) }; + return r; +} + +// Reduce multiply 1024 x 1024 int8 elements (32x q4/8 blocks in 8x HVX vectors). +// Accumulate each block into a single int32 value. +// Return a single HVX vector with 32x int32 accumulators. +// This version is parameterized to support less than 1024 elements. +// if() checks are optimized out at compile time -- make sure to pass N as a constexpr. + +static inline HVX_Vector hvx_vec_rmpy_x8_n(HVX_Vector_x8 x, HVX_Vector_x8 y, unsigned int n) { + HVX_Vector r0 = Q6_V_vsplat_R(0); + HVX_Vector r1 = Q6_V_vsplat_R(0); + HVX_Vector r2 = Q6_V_vsplat_R(0); + HVX_Vector r3 = Q6_V_vsplat_R(0); + HVX_Vector r4 = Q6_V_vsplat_R(0); + HVX_Vector r5 = Q6_V_vsplat_R(0); + HVX_Vector r6 = Q6_V_vsplat_R(0); + HVX_Vector r7 = Q6_V_vsplat_R(0); + + HVX_VectorPair p3; + HVX_VectorPair p2; + HVX_VectorPair p1; + HVX_VectorPair p0; + + if (n >= 128) { r0 = Q6_Vw_vrmpy_VbVb(x.v[0], y.v[0]); } + if (n >= 256) { r1 = Q6_Vw_vrmpy_VbVb(x.v[1], y.v[1]); } + if (n >= 384) { r2 = Q6_Vw_vrmpy_VbVb(x.v[2], y.v[2]); } + if (n >= 512) { r3 = Q6_Vw_vrmpy_VbVb(x.v[3], y.v[3]); } + if (n >= 640) { r4 = Q6_Vw_vrmpy_VbVb(x.v[4], y.v[4]); } + if (n >= 768) { r5 = Q6_Vw_vrmpy_VbVb(x.v[5], y.v[5]); } + if (n >= 896) { r6 = Q6_Vw_vrmpy_VbVb(x.v[6], y.v[6]); } + if (n >= 1024) { r7 = Q6_Vw_vrmpy_VbVb(x.v[7], y.v[7]); } + + if (n >= 128) { p0 = Q6_W_vdeal_VVR(r1, r0, -4); } + if (n >= 384) { p1 = Q6_W_vdeal_VVR(r3, r2, -4); } + if (n >= 640) { p2 = Q6_W_vdeal_VVR(r5, r4, -4); } + if (n >= 896) { p3 = Q6_W_vdeal_VVR(r7, r6, -4); } + + if (n >= 128) { r0 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p0), Q6_V_hi_W(p0)); } + if (n >= 384) { r1 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p1), Q6_V_hi_W(p1)); } + if (n >= 640) { r2 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p2), Q6_V_hi_W(p2)); } + if (n >= 896) { r3 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p3), Q6_V_hi_W(p3)); } + + if (n >= 128) { p0 = Q6_W_vdeal_VVR(r1, r0, -4); } + if (n >= 640) { p1 = Q6_W_vdeal_VVR(r3, r2, -4); } + + if (n >= 128) { r0 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p0), Q6_V_hi_W(p0)); } + if (n >= 640) { r1 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p1), Q6_V_hi_W(p1)); } + + if (n >= 128) { p0 = Q6_W_vdeal_VVR(r1, r0, -4); } + if (n >= 128) { r0 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p0), Q6_V_hi_W(p0)); } + + return r0; +} + +static inline HVX_Vector hvx_vec_rmpy_x8_full(HVX_Vector_x8 x, HVX_Vector_x8 y) { + return hvx_vec_rmpy_x8_n(x, y, 1024); +} + +// Handle most common cases of tensors not multiple of 1024. +static inline HVX_Vector hvx_vec_rmpy_x8_nloe(HVX_Vector_x8 x, HVX_Vector_x8 y, unsigned int n) { + if (n <= 256) { return hvx_vec_rmpy_x8_n(x, y, 256); }; + if (n <= 512) { return hvx_vec_rmpy_x8_n(x, y, 512); }; + if (n <= 768) { return hvx_vec_rmpy_x8_n(x, y, 768); }; + return hvx_vec_rmpy_x8_n(x, y, 1024); +} + +static void vec_dot_q4x4x2_q8x4x2(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { + assert(n % 32 == 0); // min sub-block size + assert((unsigned long) vx % 128 == 0); + assert((unsigned long) vy % 128 == 0); + + const uint32_t qk = QK_Q4_0x4x2 * 4; + + const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16 + const uint32_t x_qblk_size = qk / 2; // int4 + const uint32_t x_qrow_size = n / 2; // int4 (not padded) + + const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16 + const uint32_t y_qblk_size = qk; // int8 + const uint32_t y_qrow_size = n; // int8 (not padded) + + const uint8_t * restrict r0_x_q = ((const uint8_t *) vx + 0); // quants first + const uint8_t * restrict r0_x_d = ((const uint8_t *) vx + x_qrow_size); // then scales + + const uint8_t * restrict y_q = ((const uint8_t *) vy + 0); // quants first + const uint8_t * restrict y_d = ((const uint8_t *) vy + y_qrow_size); // then scales + + // Row sum (qf32) + HVX_Vector r0_sum = Q6_V_vsplat_R(0); + + // Multiply and accumulate into int32. + // Compute combined scale (fp32). + // Apply scale to acc and accumulate into the row sum (qf32). + + const uint32_t nb = n / qk; // num full blocks + const uint32_t nloe = n % qk; // num leftover elemements + + uint32_t i = 0; + for (; i < nb; i++) { + HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); + HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8(r0_x_q + i * x_qblk_size); + + HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); + + HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); + HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); + + HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); + + HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); + + r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); + } + + // Process leftovers, we still load full 4x4x2 block but zero out unused scales/blocks + if (nloe) { + HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); + HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8(r0_x_q + i * x_qblk_size); + + HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_nloe(r0_q, vy_q, nloe)); + + HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); + HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); + + HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); + + // Zero out unused scales + HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8); + r0_dd = Q6_V_vand_QV(bmask, r0_dd); + + HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); + + r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); + } + + // Reduce and convert into fp32 + r0_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r0_sum)); + + hvx_vec_store_u(&s[0], 4, r0_sum); +} + +static void vec_dot_q4x4x2_q8x4x2_rx2(const int n, + float * restrict s, + const void * restrict vx, + uint32_t vx_row_size, + const void * restrict vy) { + assert(n % 32 == 0); // min sub-block size + assert((unsigned long) vx % 128 == 0); + assert((unsigned long) vy % 128 == 0); + + const uint32_t qk = QK_Q4_0x4x2 * 4; + + const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16 + const uint32_t x_qblk_size = qk / 2; // int4 + const uint32_t x_qrow_size = n / 2; // int4 (not padded) + + const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16 + const uint32_t y_qblk_size = qk; // int8 + const uint32_t y_qrow_size = n; // int8 (not padded) + + const uint8_t * restrict r0_x_q = ((const uint8_t *) (vx + (0 * vx_row_size)) + 0); // quants first + const uint8_t * restrict r0_x_d = ((const uint8_t *) (vx + (0 * vx_row_size)) + x_qrow_size); // then scales + + const uint8_t * restrict r1_x_q = ((const uint8_t *) (vx + (1 * vx_row_size)) + 0); // quants first + const uint8_t * restrict r1_x_d = ((const uint8_t *) (vx + (1 * vx_row_size)) + x_qrow_size); // then scales + + const uint8_t * restrict y_q = ((const uint8_t *) vy + 0); // quants first + const uint8_t * restrict y_d = ((const uint8_t *) vy + y_qrow_size); // then scales + + // Row sum (qf32) + HVX_Vector r0_sum = Q6_V_vsplat_R(0); + HVX_Vector r1_sum = Q6_V_vsplat_R(0); + + // Multiply and accumulate into int32. + // Compute combined scale (fp32). + // Apply scale to acc and accumulate into the row sum (qf32). + + const uint32_t nb = n / qk; // num full blocks + const uint32_t nloe = n % qk; // num leftover elemements + + uint32_t i = 0; + for (; i < nb; i++) { + HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); + HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8(r0_x_q + i * x_qblk_size); + HVX_Vector_x8 r1_q = hvx_vec_load_q4x4x8(r1_x_q + i * x_qblk_size); + + HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); + HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy_q)); + + HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); + HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); + HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size)); + + HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); + HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy_d))); + + HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); + HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd); + + r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); + r1_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r1_sum, r1_fa); + } + + // Process leftovers, we still load full 4x4x2 block but zero out unused scales/blocks + if (nloe) { + HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); + HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8(r0_x_q + i * x_qblk_size); + HVX_Vector_x8 r1_q = hvx_vec_load_q4x4x8(r1_x_q + i * x_qblk_size); + + HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_nloe(r0_q, vy_q, nloe)); + HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_nloe(r1_q, vy_q, nloe)); + + HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); + HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); + HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size)); + + HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); + HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy_d))); + + // Zero out unused scales + HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8); + r0_dd = Q6_V_vand_QV(bmask, r0_dd); + r1_dd = Q6_V_vand_QV(bmask, r1_dd); + + HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); + HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd); + + r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); + r1_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r1_sum, r1_fa); + } + + // Convert into fp32 and reduce + r0_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r0_sum)); + r1_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r1_sum)); + HVX_VectorPair p0 = Q6_W_vshuff_VVR(r1_sum, r0_sum, 4); + + hvx_vec_store_u(&s[0], 8, Q6_V_lo_W(p0)); +} + +static void vec_dot_q8x4x2_q8x4x2(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { + assert(n % 32 == 0); // min sub-block size + assert((unsigned long) vx % 128 == 0); + assert((unsigned long) vy % 128 == 0); + + const uint32_t qk = QK_Q4_0x4x2 * 4; + + const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16 + const uint32_t x_qblk_size = qk; // int8 + const uint32_t x_qrow_size = n; // int8 (not padded) + + const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16 + const uint32_t y_qblk_size = qk; // int8 + const uint32_t y_qrow_size = n; // int8 (not padded) + + const uint8_t * restrict r0_x_q = ((const uint8_t *) vx + 0); // quants first + const uint8_t * restrict r0_x_d = ((const uint8_t *) vx + x_qrow_size); // then scales + + const uint8_t * restrict y_q = ((const uint8_t *) vy + 0); // quants first + const uint8_t * restrict y_d = ((const uint8_t *) vy + y_qrow_size); // then scales + + // Row sum (qf32) + HVX_Vector r0_sum = Q6_V_vsplat_R(0); + + // Multiply and accumulate into int32. + // Compute combined scale (fp32). + // Apply scale to acc and accumulate into the row sum (qf32). + + const uint32_t nb = n / qk; // num full blocks + int32_t nloe = n % qk; // num leftover elemements (must be signed) + + uint32_t i = 0; + for (; i < nb; i++) { + HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); + HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8(r0_x_q + i * x_qblk_size); + + HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); + + HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); + HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); + + HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); + + HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); + + r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); + } + + // Process leftovers, we still load full 4x4x2 block but zero out unused scales/blocks + if (nloe) { + HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); + HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8(r0_x_q + i * x_qblk_size); + + HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_nloe(r0_q, vy_q, nloe)); + + HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); + HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); + + HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); + + // Zero out unused scales + HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8); + r0_dd = Q6_V_vand_QV(bmask, r0_dd); + + HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); + + r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); + } + + // Reduce and convert into fp32 + r0_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r0_sum)); + + hvx_vec_store_u(&s[0], 4, r0_sum); +} + +static void vec_dot_q8x4x2_q8x4x2_rx2(const int n, + float * restrict s, + const void * restrict vx, + uint32_t vx_row_size, + const void * restrict vy) { + assert(n % 32 == 0); // min sub-block size + assert((unsigned long) vx % 128 == 0); + assert((unsigned long) vy % 128 == 0); + + const uint32_t qk = QK_Q4_0x4x2 * 4; + + const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16 + const uint32_t x_qblk_size = qk; // int8 + const uint32_t x_qrow_size = n; // int8 (not padded) + + const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16 + const uint32_t y_qblk_size = qk; // int8 + const uint32_t y_qrow_size = n; // int8 (not padded) + + const uint8_t * restrict r0_x_q = ((const uint8_t *) (vx + (0 * vx_row_size)) + 0); // quants first + const uint8_t * restrict r0_x_d = ((const uint8_t *) (vx + (0 * vx_row_size)) + x_qrow_size); // then scales + + const uint8_t * restrict r1_x_q = ((const uint8_t *) (vx + (1 * vx_row_size)) + 0); // quants first + const uint8_t * restrict r1_x_d = ((const uint8_t *) (vx + (1 * vx_row_size)) + x_qrow_size); // then scales + + const uint8_t * restrict y_q = ((const uint8_t *) vy + 0); // quants first + const uint8_t * restrict y_d = ((const uint8_t *) vy + y_qrow_size); // then scales + + // Row sum (qf32) + HVX_Vector r0_sum = Q6_V_vsplat_R(0); + HVX_Vector r1_sum = Q6_V_vsplat_R(0); + + // Multiply and accumulate into int32. + // Compute combined scale (fp32). + // Apply scale to acc and accumulate into the row sum (qf32). + + const uint32_t nb = n / qk; // num full blocks + int32_t nloe = n % qk; // num leftover elemements (must be signed) + + uint32_t i = 0; + for (; i < nb; i++) { + HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); + HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8(r0_x_q + i * x_qblk_size); + HVX_Vector_x8 r1_q = hvx_vec_load_q8x4x8(r1_x_q + i * x_qblk_size); + + HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); + HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy_q)); + + HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); + HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); + HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size)); + + HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); + HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy_d))); + + HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); + HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd); + + r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); + r1_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r1_sum, r1_fa); + } + + // Process leftovers, we still load full 4x4x2 block but zero out unused scales/blocks + if (nloe) { + HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); + HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8(r0_x_q + i * x_qblk_size); + HVX_Vector_x8 r1_q = hvx_vec_load_q8x4x8(r1_x_q + i * x_qblk_size); + + HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_nloe(r0_q, vy_q, nloe)); + HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_nloe(r1_q, vy_q, nloe)); + + HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size)); + HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size)); + HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size)); + + HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d))); + HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy_d))); + + // Zero out unused scales + HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8); + r0_dd = Q6_V_vand_QV(bmask, r0_dd); + r1_dd = Q6_V_vand_QV(bmask, r1_dd); + + HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); + HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd); + + r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); + r1_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r1_sum, r1_fa); + } + + // Convert into fp32 and reduce + r0_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r0_sum)); + r1_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r1_sum)); + HVX_VectorPair p0 = Q6_W_vshuff_VVR(r1_sum, r0_sum, 4); + + hvx_vec_store_u(&s[0], 8, Q6_V_lo_W(p0)); +} + +static void vec_dot_mxfp4x4x2_q8x4x2(const int n, + float * restrict s, + const void * restrict vx, + const void * restrict vy) { + assert(n % 32 == 0); // min sub-block size + assert((unsigned long) vx % 128 == 0); + assert((unsigned long) vy % 128 == 0); + + const uint32_t qk = QK_MXFP4x4x2 * 4; + + const uint32_t x_dblk_size = 8 * 4 * 1; // 32x e8m0 + const uint32_t x_qblk_size = qk / 2; // fp4 + const uint32_t x_qrow_size = n / 2; // fp4 (not padded) + + const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16 + const uint32_t y_qblk_size = qk; // int8 + const uint32_t y_qrow_size = n; // int8 (not padded) + + const uint8_t * restrict r0_x_q = ((const uint8_t *) vx + 0); // quants first + const uint8_t * restrict r0_x_d = ((const uint8_t *) vx + x_qrow_size); // then scales + + const uint8_t * restrict y_q = ((const uint8_t *) vy + 0); // quants first + const uint8_t * restrict y_d = ((const uint8_t *) vy + y_qrow_size); // then scales + + // Row sum (qf32) + HVX_Vector r0_sum = Q6_V_vsplat_R(0); + + // Multiply and accumulate into int32. + // Compute combined scale (fp32). + // Apply scale to acc and accumulate into the row sum (qf32). + + const uint32_t nb = n / qk; // num full blocks + int32_t nloe = n % qk; // num leftover elemements (must be signed) + + uint32_t i = 0; + for (; i < nb; i++) { + HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); + HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8(r0_x_q + i * x_qblk_size); + + HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); + + HVX_Vector vy_d = *(const HVX_UVector *) (y_d + i * y_dblk_size); + HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size); + + // Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving + HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16 + vy_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy_d), half)); + vy_d = Q6_Vsf_equals_Vqf32(vy_d); + + // Convert rX_d scales from e8m0 to fp32 + // Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ... + // Left shift with zero fill to create FP32 + // FIXME: might need to handle zero as a special case (see ggml-cpu code) + HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0; + HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff); + r0_d = Q6_V_vdelta_VV(r0_d, expand); + r0_d = Q6_V_vand_VV(r0_d, e8m0_mask); + r0_d = Q6_Vw_vasl_VwR(r0_d, 23); + + HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy_d)); + + HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); + + r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); + } + + // Process leftovers + if (nloe) { + HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); + HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8(r0_x_q + i * x_qblk_size); + + HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); + + HVX_Vector vy_d = *(const HVX_UVector *) (y_d + i * y_dblk_size); + HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size); + + // Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving + HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16 + vy_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy_d), half)); + vy_d = Q6_Vsf_equals_Vqf32(vy_d); + + // Convert rX_d scales from e8m0 to fp32 + // Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ... + // Left shift with zero fill to create FP32 + // FIXME: might need to handle zero as a special case (see ggml-cpu code) + HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0; + HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff); + r0_d = Q6_V_vdelta_VV(r0_d, expand); + r0_d = Q6_V_vand_VV(r0_d, e8m0_mask); + r0_d = Q6_Vw_vasl_VwR(r0_d, 23); + + HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy_d)); + + // Zero-out unused scales + HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8); + r0_dd = Q6_V_vand_QV(bmask, r0_dd); + + HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); + + r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); + } + + // Reduce and convert into fp32 + r0_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r0_sum)); + + hvx_vec_store_u(&s[0], 4, r0_sum); +} + +static void vec_dot_mxfp4x4x2_q8x4x2_rx2(const int n, + float * restrict s, + const void * restrict vx, + uint32_t vx_row_size, + const void * restrict vy) { + assert(n % 32 == 0); // min sub-block size + assert((unsigned long) vx % 128 == 0); + assert((unsigned long) vy % 128 == 0); + + const uint32_t qk = QK_MXFP4x4x2 * 4; + + const uint32_t x_dblk_size = 8 * 4 * 1; // 32x e8m0 + const uint32_t x_qblk_size = qk / 2; // fp4 + const uint32_t x_qrow_size = n / 2; // fp4 (not padded) + + const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16 + const uint32_t y_qblk_size = qk; // int8 + const uint32_t y_qrow_size = n; // int8 (not padded) + + const uint8_t * restrict r0_x_q = ((const uint8_t *) (vx + (0 * vx_row_size)) + 0); // quants first + const uint8_t * restrict r0_x_d = ((const uint8_t *) (vx + (0 * vx_row_size)) + x_qrow_size); // then scales + + const uint8_t * restrict r1_x_q = ((const uint8_t *) (vx + (1 * vx_row_size)) + 0); // quants first + const uint8_t * restrict r1_x_d = ((const uint8_t *) (vx + (1 * vx_row_size)) + x_qrow_size); // then scales + + const uint8_t * restrict y_q = ((const uint8_t *) vy + 0); // quants first + const uint8_t * restrict y_d = ((const uint8_t *) vy + y_qrow_size); // then scales + + // Row sum (qf32) + HVX_Vector r0_sum = Q6_V_vsplat_R(0); + HVX_Vector r1_sum = Q6_V_vsplat_R(0); + + // Multiply and accumulate into int32. + // Compute combined scale (fp32). + // Apply scale to acc and accumulate into the row sum (qf32). + + const uint32_t nb = n / qk; // num full blocks + int32_t nloe = n % qk; // num leftover elemements (must be signed) + + uint32_t i = 0; + for (; i < nb; i++) { + HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); + HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8(r0_x_q + i * x_qblk_size); + HVX_Vector_x8 r1_q = hvx_vec_load_mxfp4x4x8(r1_x_q + i * x_qblk_size); + + HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); + HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy_q)); + + HVX_Vector vy_d = *(const HVX_UVector *) (y_d + i * y_dblk_size); + HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size); + HVX_Vector r1_d = *(const HVX_UVector *) (r1_x_d + i * x_dblk_size); + + // Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving + HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16 + vy_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy_d), half)); + vy_d = Q6_Vsf_equals_Vqf32(vy_d); + + // Convert rX_d scales from e8m0 to fp32 + // Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ... + // Left shift with zero fill to create FP32 + // FIXME: might need to handle zero as a special case (see ggml-cpu code) + HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0; + HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff); + r0_d = Q6_V_vdelta_VV(r0_d, expand); + r0_d = Q6_V_vand_VV(r0_d, e8m0_mask); + r0_d = Q6_Vw_vasl_VwR(r0_d, 23); + r1_d = Q6_V_vdelta_VV(r1_d, expand); + r1_d = Q6_V_vand_VV(r1_d, e8m0_mask); + r1_d = Q6_Vw_vasl_VwR(r1_d, 23); + + HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy_d)); + HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r1_d, vy_d)); + + HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); + HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd); + + r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); + r1_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r1_sum, r1_fa); + } + + // Process leftovers + if (nloe) { + HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8(y_q + i * y_qblk_size); + HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8(r0_x_q + i * x_qblk_size); + HVX_Vector_x8 r1_q = hvx_vec_load_mxfp4x4x8(r1_x_q + i * x_qblk_size); + + HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q)); + HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy_q)); + + HVX_Vector vy_d = *(const HVX_UVector *) (y_d + i * y_dblk_size); + HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size); + HVX_Vector r1_d = *(const HVX_UVector *) (r1_x_d + i * x_dblk_size); + + // Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving + HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16 + vy_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy_d), half)); + vy_d = Q6_Vsf_equals_Vqf32(vy_d); + + // Convert rX_d scales from e8m0 to fp32 + // Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ... + // Left shift with zero fill to create FP32 + // FIXME: might need to handle zero as a special case (see ggml-cpu code) + HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0; + HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff); + r0_d = Q6_V_vdelta_VV(r0_d, expand); + r0_d = Q6_V_vand_VV(r0_d, e8m0_mask); + r0_d = Q6_Vw_vasl_VwR(r0_d, 23); + r1_d = Q6_V_vdelta_VV(r1_d, expand); + r1_d = Q6_V_vand_VV(r1_d, e8m0_mask); + r1_d = Q6_Vw_vasl_VwR(r1_d, 23); + + HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy_d)); + HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r1_d, vy_d)); + + // Zero-out unused scales + HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8); + r0_dd = Q6_V_vand_QV(bmask, r0_dd); + r1_dd = Q6_V_vand_QV(bmask, r1_dd); + + HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd); + HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd); + + r0_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r0_sum, r0_fa); + r1_sum = Q6_Vqf32_vadd_Vqf32Vqf32(r1_sum, r1_fa); + } + + // Convert into fp32 and reduce + r0_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r0_sum)); + r1_sum = hvx_vec_fp32_reduce_sum(Q6_Vsf_equals_Vqf32(r1_sum)); + HVX_VectorPair p0 = Q6_W_vshuff_VVR(r1_sum, r0_sum, 4); + + hvx_vec_store_u(&s[0], 8, Q6_V_lo_W(p0)); +} + +#if 1 +static void vec_dot_f16_f32(const int n, float * restrict s, const void * restrict x, const void * restrict y) { + if (0) { + float rsum = 0; + const __fp16 * restrict vx = (const __fp16 * restrict) x; + const float * restrict vy = (const float * restrict) y; + + for (uint32_t i = 0; i < n; i++) { + rsum += vx[i] * (__fp16) vy[i]; + } + *s = rsum; + return; + } + + const HVX_UVector * restrict vx = (const HVX_UVector * restrict) x; + const HVX_UVectorPair * restrict vy = (const HVX_UVectorPair * restrict) y; + + uint32_t nv0 = n / 64; // num full fp16 hvx vectors + uint32_t nv1 = n % 64; // leftover elements + + // for some reason we need volatile here so that the compiler doesn't try anything funky + volatile HVX_Vector rsum = Q6_V_vsplat_R(0); + + uint32_t i = 0; + + for (i = 0; i < nv0; i++) { + HVX_VectorPair yp = vy[i]; + + HVX_Vector x = vx[i]; + HVX_VectorPair xp = Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(x), Q6_Vh_vsplat_R(0x3C00)); // mul by 1.0 + + HVX_Vector hi = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_hi_W(xp)), Q6_V_hi_W(yp)); + HVX_Vector lo = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_lo_W(xp)), Q6_V_lo_W(yp)); + + HVX_Vector sum = Q6_Vqf32_vadd_Vqf32Vqf32(hi, lo); + rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, sum); + } + + if (nv1) { + HVX_VectorPair yp = vy[i]; + + HVX_Vector x = vx[i]; + HVX_VectorPair xp = Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(x), Q6_Vh_vsplat_R(0x3C00)); // mul by 1.0 + + if (nv1 >= 32) { + HVX_Vector hi = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_hi_W(xp)), Q6_V_hi_W(yp)); + rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, hi); + nv1 -= 32; + } + + rsum = hvx_vec_qf32_reduce_sum(rsum); + + if (nv1) { + HVX_Vector lo = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(Q6_V_lo_W(xp)), Q6_V_lo_W(yp)); + HVX_Vector sum = hvx_vec_qf32_reduce_sum_n(lo, nv1); + rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, sum); + } + + // hvx_vec_dump_fp16("X", x); + // hvx_vec_dump_fp16("Y", y); + // hvx_vec_dump_fp32("SUM", Q6_Vsf_equals_Vqf32(sum)); + // hvx_vec_dump_fp32("RSUM", Q6_Vsf_equals_Vqf32(rsum)); + } else { + rsum = hvx_vec_qf32_reduce_sum(rsum); + } + + *s = hvx_vec_get_fp32(Q6_Vsf_equals_Vqf32(rsum)); + +# ifdef HTP_DEBUG + { + float rsum = 0; + const __fp16 * restrict vx = (const __fp16 * restrict) x; + const float * restrict vy = (const float * restrict) y; + + for (uint32_t i = 0; i < n; i++) { + rsum += vx[i] * vy[i]; + } + + float diff = fabs(*s - rsum); + if (diff > 0.001) { + FARF(HIGH, "vec-dot-f16-missmatch: %u (%u:%u) expected %.6f got %.6f\n", n, nv0, nv1, rsum, *s); + // htp_dump_f16("x", vx, n); + // htp_dump_f32("y", vy, n); + } + } +# endif +} +#else +static void vec_dot_f16_f32(const int n, float * restrict s, const void * restrict x, const void * restrict y) { + const uint32_t fk = 64; + const uint32_t nb = n / fk; + + assert(n % fk == 0); + assert(nb % 4 == 0); + + const uint32_t x_blk_size = 2 * fk; // fp16 + const uint32_t y_blk_size = 4 * fk; // fp32 + + // Row sum (qf32) + HVX_Vector rsum0 = Q6_V_vsplat_R(0); + HVX_Vector rsum1 = Q6_V_vsplat_R(0); + HVX_Vector rsum2 = Q6_V_vsplat_R(0); + HVX_Vector rsum3 = Q6_V_vsplat_R(0); + + for (uint32_t i = 0; i < nb; i += 4) { + HVX_Vector_x4 vx = hvx_vec_load_x4_f16(x + (i * x_blk_size)); + HVX_Vector_x4 vy = hvx_vec_load_x4_f32_as_f16(y + (i * y_blk_size)); + + HVX_VectorPair fa0 = Q6_Wqf32_vmpy_VhfVhf(vx.v[0], vy.v[0]); + HVX_VectorPair fa1 = Q6_Wqf32_vmpy_VhfVhf(vx.v[1], vy.v[1]); + HVX_VectorPair fa2 = Q6_Wqf32_vmpy_VhfVhf(vx.v[2], vy.v[2]); + HVX_VectorPair fa3 = Q6_Wqf32_vmpy_VhfVhf(vx.v[3], vy.v[3]); + + rsum0 = Q6_Vqf32_vadd_Vqf32Vqf32(rsum0, Q6_Vqf32_vadd_Vqf32Vqf32(Q6_V_lo_W(fa0), Q6_V_hi_W(fa0))); + rsum1 = Q6_Vqf32_vadd_Vqf32Vqf32(rsum1, Q6_Vqf32_vadd_Vqf32Vqf32(Q6_V_lo_W(fa1), Q6_V_hi_W(fa1))); + rsum2 = Q6_Vqf32_vadd_Vqf32Vqf32(rsum2, Q6_Vqf32_vadd_Vqf32Vqf32(Q6_V_lo_W(fa2), Q6_V_hi_W(fa2))); + rsum3 = Q6_Vqf32_vadd_Vqf32Vqf32(rsum3, Q6_Vqf32_vadd_Vqf32Vqf32(Q6_V_lo_W(fa3), Q6_V_hi_W(fa3))); + } + + // Reduce and convert into fp32 + rsum0 = Q6_Vqf32_vadd_Vqf32Vqf32(rsum0, rsum1); + rsum2 = Q6_Vqf32_vadd_Vqf32Vqf32(rsum2, rsum3); + HVX_Vector rsum = hvx_vec_qf32_reduce_sum(Q6_Vqf32_vadd_Vqf32Vqf32(rsum0, rsum2)); + hvx_vec_store_u(s, 4, Q6_Vsf_equals_Vqf32(rsum)); +} +#endif + +#define htp_matmul_preamble \ + const uint32_t ne00 = src0->ne[0]; \ + const uint32_t ne01 = src0->ne[1]; \ + const uint32_t ne02 = src0->ne[2]; \ + const uint32_t ne03 = src0->ne[3]; \ + \ + const uint32_t ne10 = src1->ne[0]; \ + const uint32_t ne11 = src1->ne[1]; \ + const uint32_t ne12 = src1->ne[2]; \ + const uint32_t ne13 = src1->ne[3]; \ + \ + const uint32_t ne0 = dst->ne[0]; \ + const uint32_t ne1 = dst->ne[1]; \ + const uint32_t ne2 = dst->ne[2]; \ + const uint32_t ne3 = dst->ne[3]; \ + \ + const uint32_t nb00 = src0->nb[0]; \ + const uint32_t nb01 = src0->nb[1]; \ + const uint32_t nb02 = src0->nb[2]; \ + const uint32_t nb03 = src0->nb[3]; \ + \ + const uint32_t nb10 = src1->nb[0]; \ + const uint32_t nb11 = src1->nb[1]; \ + const uint32_t nb12 = src1->nb[2]; \ + const uint32_t nb13 = src1->nb[3]; \ + \ + const uint32_t nb0 = dst->nb[0]; \ + const uint32_t nb1 = dst->nb[1]; \ + const uint32_t nb2 = dst->nb[2]; \ + const uint32_t nb3 = dst->nb[3]; + +// q8x4 src1 tensor is already in VTCM spad +static void matmul(struct htp_matmul_type * mt, + struct htp_tensor * restrict src0, + struct htp_tensor * restrict src1, + struct htp_tensor * restrict dst, + struct htp_spad * restrict src0_spad, + struct htp_spad * restrict src1_spad, + struct htp_spad * restrict dst_spad, + uint32_t nth, + uint32_t ith, + uint32_t src0_nrows_per_thread, + dma_queue * dma_queue) { + htp_matmul_preamble; + + const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows + const uint32_t src1_nrows = ne11 * ne12 * ne13; // src1 rows + + const uint32_t src0_start_row = src0_nrows_per_thread * ith; + const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); + const uint32_t src0_end_row_x2 = src0_start_row + ((src0_end_row - src0_start_row) & ~1U); + + // no work for this thread + if (src0_start_row >= src0_end_row) { + return; + } + + const size_t dst_row_size = nb1; + const size_t src0_row_size = nb01; + const size_t src1_row_size = q8x4x2_row_size(ne10); + + const size_t src0_row_size_padded = htp_round_up(src0_row_size, 128); + + // Per-thread VTCM scratchpads for all tensors + // Note that the entire src1 tensor is already in VTCM + // For other tensors we allocate N rows per thread, padded to HVX vector size + uint8_t * restrict spad_dst = dst_spad->data + dst_spad->size_per_thread * ith; + uint8_t * restrict spad_src0 = src0_spad->data + src0_spad->size_per_thread * ith; + uint8_t * restrict src1_data = src1_spad->data; + + volatile uint64_t t1, t2; + t1 = HAP_perf_get_qtimer_count(); + + const uint8_t * restrict src0_row = (const uint8_t *) src0->data; + + // Prefill spad with src0 rows + #pragma unroll(4) + for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { + const int is0 = (ir0 - src0_start_row); + if (is0 >= HTP_SPAD_SRC0_NROWS) { + break; + } + dma_queue_push(dma_queue, spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size, + src0_row_size_padded, src0_row_size, 2); + } + + // Process src0 rows + for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { + const uint8_t * ss0 = dma_queue_pop(dma_queue); + + #pragma unroll(2) + for (uint32_t ir1 = 0; ir1 < src1_nrows; ++ir1) { + const uint8_t * restrict src1_col = (const uint8_t *) (src1_data + ir1 * src1_row_size); + float * restrict dst_row = (float *) (dst->data + (ir1 * dst_row_size)); + mt->vec_dot_rx2(ne00, &dst_row[ir0], ss0, src0_row_size_padded, src1_col); + } + + // Prefetch next (n + spad_nrows) row + const int pr0 = (ir0 + HTP_SPAD_SRC0_NROWS); + const int is0 = (pr0 - src0_start_row) % HTP_SPAD_SRC0_NROWS; + if (pr0 < src0_end_row_x2) { + dma_queue_push(dma_queue, spad_src0 + is0 * src0_row_size_padded, src0_row + pr0 * src0_row_size, + src0_row_size_padded, src0_row_size, 2); + } + } + + // Process the last row (if any) + if (src0_end_row != src0_end_row_x2) { + uint32_t ir0 = src0_end_row_x2; + const int is0 = (ir0 - src0_start_row); + dma_queue_push(dma_queue, spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size, + src0_row_size_padded, src0_row_size, 1); + const uint8_t * ss0 = dma_queue_pop(dma_queue); + + #pragma unroll(2) + for (uint32_t ir1 = 0; ir1 < src1_nrows; ++ir1) { + const uint8_t * restrict src1_col = (const uint8_t *) (src1_data + ir1 * src1_row_size); + float * restrict dst_row = (float *) (dst->data + (ir1 * dst_row_size)); + mt->vec_dot(ne00, &dst_row[ir0], ss0, src1_col); + } + } + + t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "matmul-%s %d/%d: %ux%ux%ux%u (%u:%u) * %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", mt->type, ith, nth, + src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], src1->ne[1], + src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], + (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +// q8x4x2 src1 tensor is already in VTCM spad +static void matvec(struct htp_matmul_type * mt, + struct htp_tensor * restrict src0, + struct htp_tensor * restrict src1, + struct htp_tensor * restrict dst, + struct htp_spad * restrict src0_spad, + struct htp_spad * restrict src1_spad, + struct htp_spad * restrict dst_spad, + uint32_t nth, + uint32_t ith, + uint32_t src0_nrows_per_thread, + dma_queue * dma_queue) { + htp_matmul_preamble; + + const uint32_t src0_nrows = ne01; + + const uint32_t src0_start_row = src0_nrows_per_thread * ith; + const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); + const uint32_t src0_end_row_x2 = src0_start_row + ((src0_end_row - src0_start_row) & ~1U); + + // no work for this thread + if (src0_start_row >= src0_end_row) { + return; + } + + const size_t dst_row_size = nb1; + const size_t src0_row_size = nb01; + const size_t src1_row_size = q8x4x2_row_size(ne10); + + const size_t src0_row_size_padded = htp_round_up(src0_row_size, 128); + + // Per-thread VTCM scratchpads for all tensors + // Note that the entire src1 tensor is already in VTCM + // For other tensors we allocate N rows per thread, padded to HVX vector size + uint8_t * spad_dst = dst_spad->data + dst_spad->size_per_thread * ith; + uint8_t * spad_src0 = src0_spad->data + src0_spad->size_per_thread * ith; + uint8_t * src1_data = src1_spad->data; + + uint64_t t1, t2; + t1 = HAP_perf_get_qtimer_count(); + + float * tmp = (float *) spad_dst; + + const uint8_t * restrict src0_row = (const uint8_t *) src0->data; + const uint8_t * restrict src1_col = (const uint8_t *) src1_data; + float * restrict dst_col = (float *) dst->data; + + // Prefill spad with 2x src0 rows + #pragma unroll(2) + for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { + const uint32_t is0 = (ir0 - src0_start_row); + if (is0 >= HTP_SPAD_SRC0_NROWS) { + break; + } + dma_queue_push(dma_queue, spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size, + src0_row_size_padded, src0_row_size, 2); + } + + // Process src0 rows + for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { + const uint8_t * ss0 = dma_queue_pop(dma_queue); + mt->vec_dot_rx2(ne00, &tmp[ir0 - src0_start_row], ss0, src0_row_size_padded, src1_col); + + // Prefetch next (n + spad_nrows) row + const uint32_t pr0 = (ir0 + HTP_SPAD_SRC0_NROWS); + const uint32_t is0 = (pr0 - src0_start_row) % HTP_SPAD_SRC0_NROWS; + if (pr0 < src0_end_row_x2) { + dma_queue_push(dma_queue, spad_src0 + is0 * src0_row_size_padded, src0_row + pr0 * src0_row_size, + src0_row_size_padded, src0_row_size, 2); + } + } + + // Process the last row (if any) + if (src0_end_row != src0_end_row_x2) { + const uint32_t ir0 = src0_end_row_x2; + const uint32_t is0 = (ir0 - src0_start_row); + dma_queue_push(dma_queue, spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size, + src0_row_size_padded, src0_row_size, 1); + const uint8_t * ss0 = dma_queue_pop(dma_queue); + mt->vec_dot(ne00, &tmp[ir0 - src0_start_row], ss0, src1_col); + } + + hvx_copy_fp32_ua((uint8_t *) &dst_col[src0_start_row], (uint8_t *) tmp, src0_end_row - src0_start_row); + + t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "matvec-%s %u/%u: %ux%ux%ux%u (%u:%u) * %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", mt->type, ith, nth, + src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], src1->ne[1], + src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], + (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id) * ids->ne[0] * ids->ne[1] + (i1)] + +struct mmid_row_mapping { + uint32_t i1; + uint32_t i2; +}; + +// q8x4 src1 tensor is already in VTCM spad +static void matmul_id(struct htp_matmul_type * mt, + struct htp_tensor * restrict src0, + struct htp_tensor * restrict src1, + struct htp_tensor * restrict ids, + struct htp_tensor * restrict dst, + struct htp_spad * restrict src0_spad, + struct htp_spad * restrict src1_spad, + struct htp_spad * restrict src2_spad, + struct htp_spad * restrict dst_spad, + uint32_t nth, + uint32_t ith, + uint32_t src0_nrows_per_thread, + dma_queue * dma_queue) { + htp_matmul_preamble; + + uint64_t t1, t2; + t1 = HAP_perf_get_qtimer_count(); + + const uint32_t src0_nrows = ne01; // src0 rows per expert + const uint32_t src1_nrows = ne11; + + const uint32_t src0_start_row = src0_nrows_per_thread * ith; + const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); + const uint32_t src0_end_row_x2 = src0_start_row + ((src0_end_row - src0_start_row) & ~1U); + + // no work for this thread + if (src0_start_row >= src0_end_row) { + return; + } + + const uint32_t n_ids = ids->ne[0]; // n_expert_used + const uint32_t n_as = ne02; // n_expert + + const size_t matrix_row_counts_size = n_as * sizeof(uint32_t); + const size_t matrix_row_map_size = n_as * ids->ne[0] * ids->ne[1] * sizeof(struct mmid_row_mapping); + + const uint32_t * matrix_row_counts = (const uint32_t *) src2_spad->data + 0; + const struct mmid_row_mapping * matrix_rows = (const void *) src2_spad->data + matrix_row_counts_size; + + const size_t dst_row_size = nb1; + const size_t src0_row_size = nb01; + const size_t src1_row_size = q8x4x2_row_size(ne10); + + const size_t src0_row_size_padded = htp_round_up(src0_row_size, 128); + + // Per-thread VTCM scratchpads for all tensors + // Note that the entire src1 tensor is already in VTCM + // For other tensors we allocate N rows per thread, padded to HVX vector size + uint8_t * restrict spad_dst = dst_spad->data + dst_spad->size_per_thread * ith; + uint8_t * restrict spad_src0 = src0_spad->data + src0_spad->size_per_thread * ith; + uint8_t * restrict src1_data = src1_spad->data; + + for (uint32_t cur_a = 0; cur_a < n_as; ++cur_a) { + const int32_t cne1 = matrix_row_counts[cur_a]; + + if (cne1 == 0) { + continue; + } + + const uint8_t * src0_row = (const uint8_t *) src0->data + (0 + cur_a * nb02 + 0); + + // Prefill spad with src0 rows + #pragma unroll(4) + for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { + const int is0 = (ir0 - src0_start_row); + if (is0 >= HTP_SPAD_SRC0_NROWS) { + break; + } + dma_queue_push(dma_queue, spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size, + src0_row_size_padded, src0_row_size, 2); + } + + // Process src0 rows + for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { + const uint8_t * ss0 = dma_queue_pop(dma_queue); + + for (uint32_t cid = 0; cid < cne1; ++cid) { + struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, cid); + const int rm1 = row_mapping.i1; // expert idx + const int rm2 = row_mapping.i2; // token idx + + const uint32_t ir1 = src1_nrows == 1 ? 0 : rm1; // src1 row idx + const uint8_t * restrict src1_col = + (const uint8_t *) (src1_data + (ir1 + rm2 * ne11 + 0) * src1_row_size); + float * dst_row = (float *) (dst->data + (rm1 * nb1 + rm2 * nb2 + 0)); + + mt->vec_dot_rx2(ne00, &dst_row[ir0], ss0, src0_row_size_padded, src1_col); + } + + // Prefetch next (n + spad_nrows) row + const int pr0 = (ir0 + HTP_SPAD_SRC0_NROWS); + const int is0 = (pr0 - src0_start_row) % HTP_SPAD_SRC0_NROWS; + if (pr0 < src0_end_row_x2) { + dma_queue_push(dma_queue, spad_src0 + is0 * src0_row_size_padded, src0_row + pr0 * src0_row_size, + src0_row_size_padded, src0_row_size, 2); + } + } + + // Process the last row (if any) + if (src0_end_row != src0_end_row_x2) { + uint32_t ir0 = src0_end_row_x2; + const uint32_t is0 = (ir0 - src0_start_row); + dma_queue_push(dma_queue, spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size, + src0_row_size_padded, src0_row_size, 1); + const uint8_t * ss0 = dma_queue_pop(dma_queue); + + for (uint32_t cid = 0; cid < cne1; ++cid) { + struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, cid); + const int rm1 = row_mapping.i1; // expert idx + const int rm2 = row_mapping.i2; // token idx + + const uint32_t ir1 = src1_nrows == 1 ? 0 : rm1; // src1 row idx + const uint8_t * restrict src1_col = + (const uint8_t *) (src1_data + (ir1 + rm2 * ne11 + 0) * src1_row_size); + float * dst_row = (float *) (dst->data + (rm1 * nb1 + rm2 * nb2 + 0)); + + mt->vec_dot(ne00, &dst_row[ir0], ss0, src1_col); + } + } + } + + t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "matmul-id-%s %d/%d: %ux%ux%ux%u (%u:%u) * %ux%ux%ux%u (%ux%ux%ux%u) -> %ux%ux%ux%u usec %u\n", mt->type, + ith, nth, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], + src1->ne[1], src1->ne[2], src1->ne[3], ids->ne[0], ids->ne[1], ids->ne[2], ids->ne[3], dst->ne[0], dst->ne[1], + dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +// q8x4 src1 tensor is already in VTCM spad +static void matvec_id(struct htp_matmul_type * mt, + struct htp_tensor * restrict src0, + struct htp_tensor * restrict src1, + struct htp_tensor * restrict src2, + struct htp_tensor * restrict dst, + struct htp_spad * restrict src0_spad, + struct htp_spad * restrict src1_spad, + struct htp_spad * restrict src2_spad, + struct htp_spad * restrict dst_spad, + uint32_t nth, + uint32_t ith, + uint32_t src0_nrows_per_thread, + dma_queue * dma_queue) { + htp_matmul_preamble; + + uint64_t t1, t2; + t1 = HAP_perf_get_qtimer_count(); + + const uint32_t src0_nrows = ne01; // src0 rows per expert + + const uint32_t src0_start_row = src0_nrows_per_thread * ith; + const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); + const uint32_t src0_end_row_x2 = src0_start_row + ((src0_end_row - src0_start_row) & ~1U); + + // no work for this thread + if (src0_start_row >= src0_end_row) { + return; + } + + assert(ne13 % ne03 == 0); + + const size_t dst_row_size = nb1; + const size_t src0_row_size = nb01; + const size_t src1_row_size = q8x4x2_row_size(ne10); + + const size_t src0_row_size_padded = htp_round_up(src0_row_size, 128); + + const uint32_t n_aids = src2->ne[0]; // num activated experts + const uint32_t n_ids = ne02; // num experts + + // Per-thread VTCM scratchpads for all tensors + // Note that the entire src1 tensor is already in VTCM + // For other tensors we allocate N rows per thread, padded to HVX vector size + uint8_t * restrict spad_dst = dst_spad->data + dst_spad->size_per_thread * ith; + uint8_t * restrict spad_src0 = src0_spad->data + src0_spad->size_per_thread * ith; + uint8_t * restrict src1_data = src1_spad->data; + + for (uint32_t ie1 = 0; ie1 < n_aids; ++ie1) { // for each expert + const uint32_t eid = *(const int32_t *) ((const uint8_t *) src2->data + ie1 * src2->nb[0]); + assert(eid < n_ids); + + const uint8_t * restrict src0_row = (const uint8_t *) src0->data + eid * nb02; + const uint8_t * restrict src1_col = (const uint8_t *) src1_data; + float * restrict dst_row = (float *) (dst->data + ie1 * nb1); + + // Prefill spad with src0 rows + #pragma unroll(4) + for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { + const int is0 = (ir0 - src0_start_row); + if (is0 >= HTP_SPAD_SRC0_NROWS) { + break; + } + dma_queue_push(dma_queue, spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size, + src0_row_size_padded, src0_row_size, 2); + } + + // Process src0 rows + for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) { + const uint8_t * ss0 = dma_queue_pop(dma_queue); + mt->vec_dot_rx2(ne00, &dst_row[ir0], ss0, src0_row_size_padded, src1_col); + + // Prefetch next (n + spad_nrows) row + const int pr0 = (ir0 + HTP_SPAD_SRC0_NROWS); + const int is0 = (pr0 - src0_start_row) % HTP_SPAD_SRC0_NROWS; + if (pr0 < src0_end_row_x2) { + dma_queue_push(dma_queue, spad_src0 + is0 * src0_row_size_padded, src0_row + pr0 * src0_row_size, + src0_row_size_padded, src0_row_size, 2); + } + } + + // Process the last row (if any) + if (src0_end_row != src0_end_row_x2) { + uint32_t ir0 = src0_end_row_x2; + const uint32_t is0 = (ir0 - src0_start_row); + dma_queue_push(dma_queue, spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size, + src0_row_size_padded, src0_row_size, 1); + const uint8_t * ss0 = dma_queue_pop(dma_queue); + mt->vec_dot(ne00, &dst_row[ir0], ss0, src1_col); + } + } + + t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "matvec-id-%s %d/%d: %ux%ux%ux%u (%u:%u) * %ux%ux%ux%u (%ux%ux%ux%u) -> %ux%ux%ux%u usec %u\n", mt->type, + ith, nth, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], + src1->ne[1], src1->ne[2], src1->ne[3], src2->ne[0], src2->ne[1], src2->ne[2], src2->ne[3], dst->ne[0], + dst->ne[1], dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +// *** matmul in fp16 + +static void matmul_f16_f32(struct htp_tensor * restrict src0, + struct htp_tensor * restrict src1, + struct htp_tensor * restrict dst, + struct htp_spad * restrict src0_spad, + struct htp_spad * restrict src1_spad, + struct htp_spad * restrict dst_spad, + uint32_t nth, + uint32_t ith, + uint32_t src0_nrows_per_thread, + dma_queue * dma_queue) { + htp_matmul_preamble; + + uint64_t t1, t2; + t1 = HAP_perf_get_qtimer_count(); + + const size_t src0_row_size = sizeof(__fp16) * ne00; + const size_t src1_row_size = sizeof(float) * ne10; + + assert(ne12 % ne02 == 0); + assert(ne13 % ne03 == 0); + + // This is the size of the first dimension of the result, so we can iterate that way. (see the ASSERT above, these are the same numbers) + const uint32_t nr0 = ne0; + + // This is the size of the rest of the dimensions of the result + const uint32_t nr1 = ne1 * ne2 * ne3; + + uint32_t chunk_size = 64; + + // distribute the thread work across the inner or outer loop based on which one is larger + uint32_t nchunk0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows + uint32_t nchunk1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows + + // The number of elements in each chunk + const uint32_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; + const uint32_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; + + uint32_t current_chunk = ith; + + const uint32_t ith0 = current_chunk % nchunk0; + const uint32_t ith1 = current_chunk / nchunk0; + + const uint32_t ir0_start = dr0 * ith0; + const uint32_t ir0_end = MIN(ir0_start + dr0, nr0); + + const uint32_t ir1_start = dr1 * ith1; + const uint32_t ir1_end = MIN(ir1_start + dr1, nr1); + + // broadcast factors + const uint32_t r2 = ne12 / ne02; + const uint32_t r3 = ne13 / ne03; + + // no work for this thread + if (ir0_start >= ir0_end || ir1_start >= ir1_end) { + return; + } + + // block-tiling attempt + const uint32_t blck_0 = 64; + const uint32_t blck_1 = 64; + + float tmp[32]; + + for (uint32_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) { + for (uint32_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) { + for (uint32_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ir1++) { + const uint32_t i13 = (ir1 / (ne12 * ne1)); + const uint32_t i12 = (ir1 - i13 * ne12 * ne1) / ne1; + const uint32_t i11 = (ir1 - i13 * ne12 * ne1 - i12 * ne1); + + // broadcast src0 into src1 + const uint32_t i03 = i13 / r3; + const uint32_t i02 = i12 / r2; + + const uint32_t i1 = i11; + const uint32_t i2 = i12; + const uint32_t i3 = i13; + + const uint8_t * restrict src0_row = (const uint8_t *) src0->data + (0 + i02 * nb02 + i03 * nb03); + const uint8_t * restrict src1_col = + (const uint8_t *) src1->data + (i11 + i12 * ne11 + i13 * ne12 * ne11) * src1_row_size; + float * dst_col = (float *) ((uint8_t * restrict) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + for (uint32_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ir0++) { + vec_dot_f16_f32(ne00, &tmp[ir0 - iir0], src0_row + ir0 * src0_row_size, src1_col); + } + + hvx_copy_fp32_ua((uint8_t *) &dst_col[iir0], (uint8_t *) tmp, MIN(iir0 + blck_0, ir0_end) - iir0); + } + } + } + + t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "matmul-f16-f32 %d/%d: %ux%ux%ux%u (%u:%u %u:%u) * %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", ith, nth, + src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], ir0_start, ir0_end, ir1_start, ir1_end, src1->ne[0], + src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], + (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +// *** dynamic quant + +static inline void quantize_block_fp32_q8x4(float * restrict x, uint8_t * restrict y_q, uint8_t * restrict y_d) { + assert((unsigned long) x % 128 == 0); + assert((unsigned long) y_q % 128 == 0); + + HVX_Vector * vx = (HVX_Vector *) x; + + // Load and convert into QF32 + HVX_Vector zero = Q6_V_vsplat_R(0); + HVX_Vector vx0_qf = Q6_Vqf32_vsub_VsfVsf(vx[0], zero); // 32 elements + HVX_Vector vx1_qf = Q6_Vqf32_vsub_VsfVsf(vx[1], zero); // 32 elements + HVX_Vector vx2_qf = Q6_Vqf32_vsub_VsfVsf(vx[2], zero); // 32 elements + HVX_Vector vx3_qf = Q6_Vqf32_vsub_VsfVsf(vx[3], zero); // 32 elements + + // Convert into fp16 + HVX_Vector vx01_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx1_qf, vx0_qf))); + HVX_Vector vx23_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx3_qf, vx2_qf))); + + // Compute max and scale + HVX_Vector vmax_hf = hvx_vec_reduce_max_fp16(hvx_vec_abs_fp16(vx01_hf)); + vmax_hf = hvx_vec_reduce_max2_fp16(hvx_vec_abs_fp16(vx23_hf), vmax_hf); + + // Replicate first fp16 scale across all lanes + HVX_Vector ctrl = *(const HVX_Vector *) repl_1x_fp16; + vmax_hf = Q6_V_vdelta_VV(vmax_hf, ctrl); + + HVX_Vector vd_qf16 = Q6_Vqf16_vmpy_VhfVhf(vmax_hf, Q6_Vh_vsplat_R(0x2008)); // 1.0 / 127.0 + HVX_Vector vd_hf = Q6_Vhf_equals_Vqf16(vd_qf16); + + *(HVX_UVector *) y_d = vd_hf; + + // Divide input by the scale + HVX_Vector vd_inv_hf = hvx_vec_inverse_fp16(vd_hf); + vx01_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx01_hf, vd_inv_hf)); + vx23_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx23_hf, vd_inv_hf)); + + // Convert to int8 + HVX_Vector vx01_i16 = hvx_vec_i16_from_hf_rnd_sat(vx01_hf); + HVX_Vector vx23_i16 = hvx_vec_i16_from_hf_rnd_sat(vx23_hf); + HVX_Vector vx_i8 = Q6_Vb_vpack_VhVh_sat(vx23_i16, vx01_i16); + + *(HVX_Vector *) y_q = vx_i8; +} + +// Overrides input x +static void quantize_row_fp32_q8x4x2(float * restrict x, uint8_t * restrict y, uint32_t k) { + assert(k % 32 == 0); + const uint32_t qk = QK_Q8_0x4x2; + const uint32_t nb = (k + qk - 1) / qk; + + const uint32_t qrow_size = k; // int8 + + const uint32_t dblk_size = 8 * 2; // 8x __fp16 + const uint32_t qblk_size = QK_Q8_0x4x2; // int8 + + uint8_t * restrict y_q = (y + 0); // quants first + uint8_t * restrict y_d = (y + qrow_size); // then scales + + // Temp scales override input since we're working off of the aligned temp buffer in VTCM + uint8_t * restrict t_d = (uint8_t *) x; + + for (uint32_t i = 0; i < nb; i++) { + quantize_block_fp32_q8x4(x + (i * 2 + 0) * qk / 2, y_q + (i * 2 + 0) * qblk_size / 2, + t_d + (i * 2 + 0) * dblk_size / 2); + quantize_block_fp32_q8x4(x + (i * 2 + 1) * qk / 2, y_q + (i * 2 + 1) * qblk_size / 2, + t_d + (i * 2 + 1) * dblk_size / 2); + } + + // now copy the scales into final location + hvx_copy_fp16_ua(y_d, t_d, nb * 8); +} + +static void quantize_fp32_q8x4x2(const struct htp_tensor * src, + uint8_t * restrict dst, + struct htp_spad * spad, + uint32_t nth, + uint32_t ith, + uint32_t nrows_per_thread) { + uint64_t t1 = HAP_perf_get_qtimer_count(); + + const uint32_t ne0 = src->ne[0]; + const uint32_t ne1 = src->ne[1]; + const uint32_t ne2 = src->ne[2]; + const uint32_t ne3 = src->ne[3]; + + const uint32_t nrows = ne1 * ne2 * ne3; // total n_rows + + const uint32_t ir_first = nrows_per_thread * ith; // first row + const uint32_t ir_last = MIN(ir_first + nrows_per_thread, nrows); // last row + + const size_t src_row_size = src->nb[1]; + const size_t dst_row_size = q8x4x2_row_size(ne0); + + uint8_t * restrict src_data = (uint8_t *) src->data + (src_row_size * ir_first); + uint8_t * restrict dst_data = (uint8_t *) dst + (dst_row_size * ir_first); + uint8_t * restrict tmp_data = (uint8_t *) spad->data + (spad->size_per_thread * ith); + + const size_t src_row_size_padded = htp_round_up(src_row_size, QK_Q8_0x4x2 * sizeof(float)); + memset(tmp_data, 0, src_row_size_padded); // zero-out temp row data for padding + + for (uint32_t i = ir_first; i < ir_last; ++i) { + htp_l2fetch(src_data, 2, src_row_size, src_row_size); + hvx_copy_fp32_aa(tmp_data, src_data, ne0); + + // FARF(HIGH, "quantize-q8x4-row: %u\n", i); + quantize_row_fp32_q8x4x2((float *) tmp_data, dst_data, ne0); + dst_data += dst_row_size; + src_data += src_row_size; + } + + uint64_t t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "quantize-fp32-q8x4: %u/%u : n-rows %u (%u:%u) row-size %u -> %u usec %u\n", ith, nth, nrows, ir_first, + ir_last, src_row_size, dst_row_size, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +static void htp_quantize_fp32_q8x4x2(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + quantize_fp32_q8x4x2(&octx->src1, octx->src1_spad.data, &octx->src0_spad, n, i, octx->src1_nrows_per_thread); +} + +// ** matmul callbacks for worker_pool + +static void htp_matvec_q4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + + struct htp_matmul_type mt; + mt.type = "q4x4x2-q8x4x2"; + mt.vec_dot = vec_dot_q4x4x2_q8x4x2; + mt.vec_dot_rx2 = vec_dot_q4x4x2_q8x4x2_rx2; + + matvec(&mt, &octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, + octx->src0_nrows_per_thread, octx->ctx->dma[i]); +} + +static void htp_matmul_q4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + + struct htp_matmul_type mt; + mt.type = "q4x4x2-q8x4x2"; + mt.vec_dot = vec_dot_q4x4x2_q8x4x2; + mt.vec_dot_rx2 = vec_dot_q4x4x2_q8x4x2_rx2; + + matmul(&mt, &octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, + octx->src0_nrows_per_thread, octx->ctx->dma[i]); +} + +static void htp_matvec_q8x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + + struct htp_matmul_type mt; + mt.type = "q8x4x2-q8x4x2"; + mt.vec_dot = vec_dot_q8x4x2_q8x4x2; + mt.vec_dot_rx2 = vec_dot_q8x4x2_q8x4x2_rx2; + + matvec(&mt, &octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, + octx->src0_nrows_per_thread, octx->ctx->dma[i]); +} + +static void htp_matmul_q8x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + + struct htp_matmul_type mt; + mt.type = "q8x4x2-q8x4x2"; + mt.vec_dot = vec_dot_q8x4x2_q8x4x2; + mt.vec_dot_rx2 = vec_dot_q8x4x2_q8x4x2_rx2; + + matmul(&mt, &octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, + octx->src0_nrows_per_thread, octx->ctx->dma[i]); +} + +static void htp_matvec_mxfp4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + + struct htp_matmul_type mt; + mt.type = "mxfp4x4x2-q8x4x2"; + mt.vec_dot = vec_dot_mxfp4x4x2_q8x4x2; + mt.vec_dot_rx2 = vec_dot_mxfp4x4x2_q8x4x2_rx2; + + matvec(&mt, &octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, + octx->src0_nrows_per_thread, octx->ctx->dma[i]); +} + +static void htp_matmul_mxfp4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + + struct htp_matmul_type mt; + mt.type = "mxfp4x4x2-q8x4x2"; + mt.vec_dot = vec_dot_mxfp4x4x2_q8x4x2; + mt.vec_dot_rx2 = vec_dot_mxfp4x4x2_q8x4x2_rx2; + + matmul(&mt, &octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, + octx->src0_nrows_per_thread, octx->ctx->dma[i]); +} + +static void htp_matmul_f16_f32(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + matmul_f16_f32(&octx->src0, &octx->src1, &octx->dst, &octx->src0_spad, &octx->src1_spad, &octx->dst_spad, n, i, + octx->src0_nrows_per_thread, octx->ctx->dma[i]); +} + +// ** matmul-id callbacks for worker_pool + +static void htp_matvec_id_q4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + + struct htp_matmul_type mt; + mt.type = "q4x4x2-q8x4x2"; + mt.vec_dot = vec_dot_q4x4x2_q8x4x2; + mt.vec_dot_rx2 = vec_dot_q4x4x2_q8x4x2_rx2; + + matvec_id(&mt, &octx->src0, &octx->src1, &octx->src2, &octx->dst, &octx->src0_spad, &octx->src1_spad, + &octx->src2_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); +} + +static void htp_matmul_id_q4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + + struct htp_matmul_type mt; + mt.type = "q4x4x2-q8x4x2"; + mt.vec_dot = vec_dot_q4x4x2_q8x4x2; + mt.vec_dot_rx2 = vec_dot_q4x4x2_q8x4x2_rx2; + + matmul_id(&mt, &octx->src0, &octx->src1, &octx->src2, &octx->dst, &octx->src0_spad, &octx->src1_spad, + &octx->src2_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); +} + +static void htp_matvec_id_q8x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + + struct htp_matmul_type mt; + mt.type = "q8x4x2-q8x4x2"; + mt.vec_dot = vec_dot_q8x4x2_q8x4x2; + mt.vec_dot_rx2 = vec_dot_q8x4x2_q8x4x2_rx2; + + matvec_id(&mt, &octx->src0, &octx->src1, &octx->src2, &octx->dst, &octx->src0_spad, &octx->src1_spad, + &octx->src2_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); +} + +static void htp_matmul_id_q8x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + + struct htp_matmul_type mt; + mt.type = "q8x4x2-q8x4x2"; + mt.vec_dot = vec_dot_q8x4x2_q8x4x2; + mt.vec_dot_rx2 = vec_dot_q8x4x2_q8x4x2_rx2; + + matmul_id(&mt, &octx->src0, &octx->src1, &octx->src2, &octx->dst, &octx->src0_spad, &octx->src1_spad, + &octx->src2_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); +} + +static void htp_matvec_id_mxfp4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + + struct htp_matmul_type mt; + mt.type = "mxfp4x4x2-q8x4x2"; + mt.vec_dot = vec_dot_mxfp4x4x2_q8x4x2; + mt.vec_dot_rx2 = vec_dot_mxfp4x4x2_q8x4x2_rx2; + + matvec_id(&mt, &octx->src0, &octx->src1, &octx->src2, &octx->dst, &octx->src0_spad, &octx->src1_spad, + &octx->src2_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); +} + +static void htp_matmul_id_mxfp4x4x2_q8x4x2(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = data; + + struct htp_matmul_type mt; + mt.type = "mxfp4x4x2-q8x4x2"; + mt.vec_dot = vec_dot_mxfp4x4x2_q8x4x2; + mt.vec_dot_rx2 = vec_dot_mxfp4x4x2_q8x4x2_rx2; + + matmul_id(&mt, &octx->src0, &octx->src1, &octx->src2, &octx->dst, &octx->src0_spad, &octx->src1_spad, + &octx->src2_spad, &octx->dst_spad, n, i, octx->src0_nrows_per_thread, octx->ctx->dma[i]); +} + +// ** main matmul entry point + +int op_matmul(struct htp_ops_context * octx) { + const struct htp_tensor * src0 = &octx->src0; + const struct htp_tensor * src1 = &octx->src1; + struct htp_tensor * dst = &octx->dst; + + htp_matmul_preamble; + + const char * op_type; + + const uint32_t src0_nrows = ne01 * ne02 * ne03; + const uint32_t src1_nrows = ne11 * ne12 * ne13; + + const size_t src0_row_size = nb01; + const size_t dst_row_size = nb1; + size_t src1_row_size = nb11; + + const size_t src0_row_size_padded = htp_round_up(src0_row_size, 128); + size_t src1_row_size_padded; + + worker_callback_t quant_job_func; + worker_callback_t matmul_job_func; + + bool need_quant = !(octx->flags & HTP_OPFLAGS_SKIP_QUANTIZE); + + switch (src0->type) { + case HTP_TYPE_Q4_0: + op_type = "q4x4x2-fp32"; + quant_job_func = htp_quantize_fp32_q8x4x2; + if (src1_nrows > 1) { + matmul_job_func = htp_matmul_q4x4x2_q8x4x2; + } else { + matmul_job_func = htp_matvec_q4x4x2_q8x4x2; + } + + src1_row_size = q8x4x2_row_size(ne10); // row size post quantization + + // Entire src1 tensor is placed into the VTCM + // For other tensors we allocate N rows per thread, padded to HVX vector size + + octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); + octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size_padded, 256); + octx->src1_spad.size_per_thread = htp_round_up(src1_row_size * src1_nrows, 256); + + // src0 spad is also used in dynamic quantizer to store padded src1 rows + src1_row_size_padded = htp_round_up(src1_row_size, QK_Q8_0x4x2 * sizeof(float)); + if (octx->src0_spad.size_per_thread < src1_row_size_padded) { + octx->src0_spad.size_per_thread = src1_row_size_padded; + } + + octx->src1_spad.size = octx->src1_spad.size_per_thread; + octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; + octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; + break; + + case HTP_TYPE_Q8_0: + op_type = "q8x4x2-fp32"; + quant_job_func = htp_quantize_fp32_q8x4x2; + if (src1_nrows > 1) { + matmul_job_func = htp_matmul_q8x4x2_q8x4x2; + } else { + matmul_job_func = htp_matvec_q8x4x2_q8x4x2; + } + + src1_row_size = q8x4x2_row_size(ne10); // row size post quantization + + // Entire src1 tensor is placed into the VTCM + // For other tensors we allocate N rows per thread, padded to HVX vector size + + octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); + octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size_padded, 256); + octx->src1_spad.size_per_thread = htp_round_up(src1_row_size * src1_nrows, 256); + + // src0 spad is also used in dynamic quantizer to store padded src1 rows + src1_row_size_padded = htp_round_up(src1_row_size, QK_Q8_0x4x2 * sizeof(float)); + if (octx->src0_spad.size_per_thread < src1_row_size_padded) { + octx->src0_spad.size_per_thread = src1_row_size_padded; + } + + octx->src1_spad.size = octx->src1_spad.size_per_thread; + octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; + octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; + break; + + case HTP_TYPE_MXFP4: + op_type = "mxfp4x4x2-f32"; + quant_job_func = htp_quantize_fp32_q8x4x2; + if (src1_nrows > 1) { + matmul_job_func = htp_matmul_mxfp4x4x2_q8x4x2; + } else { + matmul_job_func = htp_matvec_mxfp4x4x2_q8x4x2; + } + + src1_row_size = q8x4x2_row_size(ne10); // row size post quantization + + // Entire src1 tensor is placed into the VTCM + // For other tensors we allocate N rows per thread, padded to HVX vector size + + octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); + octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size_padded, 256); + octx->src1_spad.size_per_thread = htp_round_up(src1_row_size * src1_nrows, 256); + + // src0 spad is also used in dynamic quantizer to store padded src1 rows + src1_row_size_padded = htp_round_up(src1_row_size, QK_Q8_0x4x2 * sizeof(float)); + if (octx->src0_spad.size_per_thread < src1_row_size_padded) { + octx->src0_spad.size_per_thread = src1_row_size_padded; + } + + octx->src1_spad.size = octx->src1_spad.size_per_thread; + octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; + octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; + break; + + case HTP_TYPE_F16: + op_type = "f16-f32"; + quant_job_func = NULL; // htp_quantize_f32_f16; + matmul_job_func = htp_matmul_f16_f32; + + // For all tensors we allocate N rows per thread, padded to HVX vector size + octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); + octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size, 256); + octx->src1_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC1_NROWS * src1_row_size, 256); + + octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; + octx->src1_spad.size = octx->src1_spad.size_per_thread * octx->n_threads; + octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; + + need_quant = false; + break; + + default: + return HTP_STATUS_NO_SUPPORT; + } + + // VTCM scratchpads for all tensors + size_t spad_size = octx->src1_spad.size + octx->src0_spad.size + octx->dst_spad.size; + + FARF(HIGH, "matmul-%s : src0-spad-size %u src1-spad-size %u dst-spad-size %u (%zu)\n", op_type, + octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size, spad_size); + + FARF(HIGH, "matmul-%s : %ux%ux%ux%u * %ux%ux%ux%u-> %ux%ux%ux%u (0x%p, 0x%p, 0x%p)\n", op_type, src0->ne[0], + src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0], + dst->ne[1], dst->ne[2], dst->ne[3], src0->data, src1->data, dst->data); + + // Make sure the reserved vtcm size is sufficient + if (octx->ctx->vtcm_size < spad_size) { + FARF(ERROR, "matmul-%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, + octx->ctx->vtcm_size, spad_size); + return HTP_STATUS_VTCM_TOO_SMALL; + } + + octx->src0_spad.data = octx->ctx->vtcm_base; + octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size; + octx->dst_spad.data = octx->src1_spad.data + octx->src1_spad.size; + + octx->src0_nrows_per_thread = (src0_nrows + octx->n_threads - 1) / octx->n_threads; + octx->src0_nrows_per_thread += (octx->src0_nrows_per_thread & 1); // round up to even + + if (need_quant) { + // Run quant jobs + const uint32_t n_quant_jobs = MIN(src1_nrows, octx->n_threads); + octx->src1_nrows_per_thread = (src1_nrows + n_quant_jobs - 1) / n_quant_jobs; + worker_pool_run_func(octx->ctx->worker_pool, quant_job_func, octx, n_quant_jobs); + } + + if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { + // Run matmul jobs + const uint32_t n_matmul_jobs = octx->n_threads; + worker_pool_run_func(octx->ctx->worker_pool, matmul_job_func, octx, n_matmul_jobs); + } + + return HTP_STATUS_OK; +} + +// ** main matmul-id entry point + +int op_matmul_id(struct htp_ops_context * octx) { + const struct htp_tensor * src0 = &octx->src0; + const struct htp_tensor * src1 = &octx->src1; + const struct htp_tensor * ids = &octx->src2; + struct htp_tensor * dst = &octx->dst; + + htp_matmul_preamble; + + const char * op_type; + + worker_callback_t quant_job_func; + worker_callback_t matmul_id_job_func; + + const size_t src0_row_size = nb01; + const size_t dst_row_size = nb1; + + const size_t src0_row_size_padded = htp_round_up(src0_row_size, 128); + + const uint32_t src0_nrows = ne01; // per expert + const uint32_t src1_nrows = ne11 * ne12 * ne13; + + size_t src1_row_size; + size_t src1_row_size_padded; + + // row groups + const int n_ids = ids->ne[0]; // n_expert_used + const int n_as = ne02; // n_expert + + size_t matrix_row_counts_size = n_as * sizeof(uint32_t); + size_t matrix_row_map_size = n_as * ids->ne[0] * ids->ne[1] * sizeof(struct mmid_row_mapping); + + switch (src0->type) { + case HTP_TYPE_Q4_0: + op_type = "q4x2x2-f32"; + quant_job_func = htp_quantize_fp32_q8x4x2; + src1_row_size = q8x4x2_row_size(ne10); // row size post quantization + if (src1_nrows > 1) { + matmul_id_job_func = htp_matmul_id_q4x4x2_q8x4x2; + } else { + matmul_id_job_func = htp_matvec_id_q4x4x2_q8x4x2; + } + + // Entire src1 tensor is placed into the VTCM + // For other tensors we allocate N rows per thread, padded to HVX vector size + octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); + octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size_padded, 256); + octx->src1_spad.size_per_thread = htp_round_up(src1_row_size * src1_nrows, 256); + octx->src2_spad.size_per_thread = htp_round_up(matrix_row_counts_size + matrix_row_map_size, 256); + + // src0 spad is also used in dynamic quantizer to store padded src1 rows + src1_row_size_padded = htp_round_up(src1_row_size, QK_Q8_0x4x2 * sizeof(float)); + if (octx->src0_spad.size_per_thread < src1_row_size_padded) { + octx->src0_spad.size_per_thread = src1_row_size_padded; + } + + octx->src2_spad.size = octx->src2_spad.size_per_thread; + octx->src1_spad.size = octx->src1_spad.size_per_thread; + octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; + octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; + break; + + case HTP_TYPE_Q8_0: + op_type = "q8x2x2-f32"; + quant_job_func = htp_quantize_fp32_q8x4x2; + src1_row_size = q8x4x2_row_size(ne10); // row size post quantization + if (src1_nrows > 1) { + matmul_id_job_func = htp_matmul_id_q8x4x2_q8x4x2; + } else { + matmul_id_job_func = htp_matvec_id_q8x4x2_q8x4x2; + } + + // Entire src1 tensor is placed into the VTCM + // For other tensors we allocate N rows per thread, padded to HVX vector size + octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); + octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size_padded, 256); + octx->src1_spad.size_per_thread = htp_round_up(src1_row_size * src1_nrows, 256); + octx->src2_spad.size_per_thread = htp_round_up(matrix_row_counts_size + matrix_row_map_size, 256); + + // src0 spad is also used in dynamic quantizer to store padded src1 rows + src1_row_size_padded = htp_round_up(src1_row_size, QK_Q8_0x4x2 * sizeof(float)); + if (octx->src0_spad.size_per_thread < src1_row_size_padded) { + octx->src0_spad.size_per_thread = src1_row_size_padded; + } + + octx->src2_spad.size = octx->src2_spad.size_per_thread; + octx->src1_spad.size = octx->src1_spad.size_per_thread; + octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; + octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; + break; + + case HTP_TYPE_MXFP4: + op_type = "mxfp4x2x2-f32"; + quant_job_func = htp_quantize_fp32_q8x4x2; + src1_row_size = q8x4x2_row_size(ne10); // row size post quantization + if (src1_nrows > 1) { + matmul_id_job_func = htp_matmul_id_mxfp4x4x2_q8x4x2; + } else { + matmul_id_job_func = htp_matvec_id_mxfp4x4x2_q8x4x2; + } + + // Entire src1 tensor is placed into the VTCM + // For other tensors we allocate N rows per thread, padded to HVX vector size + octx->dst_spad.size_per_thread = htp_round_up(HTP_SPAD_DST_NROWS * dst_row_size, 256); + octx->src0_spad.size_per_thread = htp_round_up(HTP_SPAD_SRC0_NROWS * src0_row_size_padded, 256); + octx->src1_spad.size_per_thread = htp_round_up(src1_row_size * src1_nrows, 256); + octx->src2_spad.size_per_thread = htp_round_up(matrix_row_counts_size + matrix_row_map_size, 256); + + // src0 spad is also used in dynamic quantizer to store padded src1 rows + src1_row_size_padded = htp_round_up(src1_row_size, QK_Q8_0x4x2 * sizeof(float)); + if (octx->src0_spad.size_per_thread < src1_row_size_padded) { + octx->src0_spad.size_per_thread = src1_row_size_padded; + } + + octx->src2_spad.size = octx->src2_spad.size_per_thread; + octx->src1_spad.size = octx->src1_spad.size_per_thread; + octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads; + octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads; + break; + + default: + return HTP_STATUS_NO_SUPPORT; + } + + size_t spad_size = octx->src2_spad.size + octx->src1_spad.size + octx->src0_spad.size + octx->dst_spad.size; + + FARF(HIGH, "matmul-id-%s : src0-spad-size %u src1-spad-size %u src2-spad-size %u dst-spad-size %u (%zu)\n", op_type, + octx->src0_spad.size, octx->src1_spad.size, octx->src2_spad.size, octx->dst_spad.size, spad_size); + + FARF(HIGH, "matmul-id-%s : %ux%ux%ux%u * %ux%ux%ux%u (%ux%ux%ux%u) -> %ux%ux%ux%u (0x%p, 0x%p, 0x%p)\n", op_type, + src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], + ids->ne[0], ids->ne[1], ids->ne[2], ids->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], src0->data, + src1->data, dst->data); + + // Make sure the reserved vtcm size is sufficient + if (octx->ctx->vtcm_size < spad_size) { + FARF(ERROR, "matmul-id-%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, + octx->ctx->vtcm_size, spad_size); + return HTP_STATUS_VTCM_TOO_SMALL; + } + + octx->src0_spad.data = octx->ctx->vtcm_base; + octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size; + octx->src2_spad.data = octx->src1_spad.data + octx->src1_spad.size; + octx->dst_spad.data = octx->src2_spad.data + octx->src2_spad.size; + + octx->src0_nrows_per_thread = (src0_nrows + octx->n_threads - 1) / octx->n_threads; + octx->src0_nrows_per_thread += (octx->src0_nrows_per_thread & 1); // round up to even + + if (src1_nrows > 1) { + // initialize matrix_row_counts and map + uint32_t * matrix_row_counts = (uint32_t *) octx->src2_spad.data + 0; + struct mmid_row_mapping * matrix_rows = (void *) octx->src2_spad.data + matrix_row_counts_size; + + memset(matrix_row_counts, 0, n_as * sizeof(uint32_t)); + + // group rows by src0 matrix + for (uint32_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) { // token idx + for (uint32_t id = 0; id < n_ids; ++id) { // expert idx + const uint32_t i02 = + *(const uint32_t *) ((const uint8_t *) ids->data + iid1 * ids->nb[1] + id * ids->nb[0]); + + assert(i02 >= 0 && i02 < n_as); + + MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = (struct mmid_row_mapping) { id, iid1 }; + matrix_row_counts[i02] += 1; + } + } + } + + // Setup worker pool callbacks + if (!(octx->flags & HTP_OPFLAGS_SKIP_QUANTIZE)) { + // Run quant jobs + const uint32_t n_quant_jobs = MIN(src1_nrows, octx->n_threads); + octx->src1_nrows_per_thread = (src1_nrows + n_quant_jobs - 1) / n_quant_jobs; + worker_pool_run_func(octx->ctx->worker_pool, quant_job_func, octx, n_quant_jobs); + } + + if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { + // Run matmul-id jobs + const uint32_t n_matmul_jobs = octx->n_threads; + worker_pool_run_func(octx->ctx->worker_pool, matmul_id_job_func, octx, n_matmul_jobs); + } + + return HTP_STATUS_OK; +} diff --git a/ggml/src/ggml-hexagon/htp/ops-utils.h b/ggml/src/ggml-hexagon/htp/ops-utils.h new file mode 100644 index 0000000000000..f03ff34028f22 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/ops-utils.h @@ -0,0 +1,116 @@ +#ifndef OPS_UTILS_H +#define OPS_UTILS_H + +#include "htp-msg.h" + +#ifndef MAX +# define MAX(a, b) ((a) > (b) ? (a) : (b)) +#endif + +#ifndef MIN +# define MIN(a, b) ((a) < (b) ? (a) : (b)) +#endif + +static inline uint64_t htp_get_cycles() { + uint64_t cycles = 0; + asm volatile(" %0 = c15:14\n" : "=r"(cycles)); + return cycles; +} + +static inline uint64_t htp_get_pktcnt() { + uint64_t pktcnt; + asm volatile(" %0 = c19:18\n" : "=r"(pktcnt)); + return pktcnt; +} + +static inline int32_t htp_is_aligned(void * addr, uint32_t align) { + return ((size_t) addr & (align - 1)) == 0; +} + +static inline uint32_t htp_round_up(uint32_t n, uint32_t m) { + return m * ((n + m - 1) / m); +} + +static inline void htp_l2fetch(const void * p, uint32_t height, uint32_t width, uint32_t stride) { + const uint64_t control = Q6_P_combine_RR(stride, Q6_R_combine_RlRl(width, height)); + asm volatile(" l2fetch(%0,%1) " : : "r"(p), "r"(control)); +} + +static inline int32_t htp_is_one_chunk(void * addr, uint32_t n, uint32_t chunk_size) { + uint32_t left_off = (size_t) addr & (chunk_size - 1); + uint32_t right_off = left_off + n; + return right_off <= chunk_size; +} + +static inline void htp_dump_int8_line(char * pref, const int8_t * x, int n) { + char str[1024], *p = str; + p += sprintf(p, "%s: ", pref); + for (int i = 0; i < 16; i++) { + p += sprintf(p, "%d, ", x[i]); + } + FARF(HIGH, "%s\n", str); +} + +static inline void htp_dump_uint8_line(char * pref, const uint8_t * x, uint32_t n) { + char str[1024], *p = str; + p += sprintf(p, "%s: ", pref); + for (int i = 0; i < n; i++) { + p += sprintf(p, "%d, ", x[i]); + } + FARF(HIGH, "%s\n", str); +} + +static inline void htp_dump_int32_line(char * pref, const int32_t * x, uint32_t n) { + char str[1024], *p = str; + p += sprintf(p, "%s: ", pref); + for (int i = 0; i < n; i++) { + p += sprintf(p, "%d, ", (int) x[i]); + } + FARF(HIGH, "%s\n", str); +} + +static inline void htp_dump_fp16_line(char * pref, const __fp16 * x, uint32_t n) { + char str[1024], *p = str; + p += sprintf(p, "%s: ", pref); + for (int i = 0; i < n; i++) { + p += sprintf(p, "%.6f, ", (float) x[i]); + } + FARF(HIGH, "%s\n", str); +} + +static inline void htp_dump_fp32_line(char * pref, const float * x, uint32_t n) { + char str[1024], *p = str; + p += sprintf(p, "%s: ", pref); + for (int i = 0; i < n; i++) { + p += sprintf(p, "%.6f, ", x[i]); + } + FARF(HIGH, "%s\n", str); +} + +static inline void htp_dump_f32(char * pref, const float * x, uint32_t n) { + uint32_t n0 = n / 16; + uint32_t n1 = n % 16; + + uint32_t i = 0; + for (; i < n0; i++) { + htp_dump_fp32_line(pref, x + (16 * i), 16); + } + if (n1) { + htp_dump_fp32_line(pref, x + (16 * i), n1); + } +} + +static inline void htp_dump_f16(char * pref, const __fp16 * x, uint32_t n) { + uint32_t n0 = n / 16; + uint32_t n1 = n % 16; + + uint32_t i = 0; + for (; i < n0; i++) { + htp_dump_fp16_line(pref, x + (16 * i), 16); + } + if (n1) { + htp_dump_fp16_line(pref, x + (16 * i), n1); + } +} + +#endif /* OPS_UTILS_H */ diff --git a/ggml/src/ggml-hexagon/htp/rope-ops.c b/ggml/src/ggml-hexagon/htp/rope-ops.c new file mode 100644 index 0000000000000..16afa50f5b04f --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/rope-ops.c @@ -0,0 +1,418 @@ +#pragma clang diagnostic ignored "-Wunused-variable" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wunused-but-set-variable" + +#ifdef HTP_DEBUG +# define FARF_HIGH 1 +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GGML_COMMON_DECL_C +#include "ggml-common.h" +#include "htp-ctx.h" +#include "htp-dma.h" +#include "htp-msg.h" +#include "htp-ops.h" +#include "hvx-utils.h" +#include "ops-utils.h" + +#define htp_rope_preamble \ + const uint32_t ne00 = src0->ne[0]; \ + const uint32_t ne01 = src0->ne[1]; \ + const uint32_t ne02 = src0->ne[2]; \ + const uint32_t ne03 = src0->ne[3]; \ + \ + const uint32_t ne0 = dst->ne[0]; \ + const uint32_t ne1 = dst->ne[1]; \ + const uint32_t ne2 = dst->ne[2]; \ + const uint32_t ne3 = dst->ne[3]; \ + \ + const uint32_t nb00 = src0->nb[0]; \ + const uint32_t nb01 = src0->nb[1]; \ + const uint32_t nb02 = src0->nb[2]; \ + const uint32_t nb03 = src0->nb[3]; \ + \ + const uint32_t nb0 = dst->nb[0]; \ + const uint32_t nb1 = dst->nb[1]; \ + const uint32_t nb2 = dst->nb[2]; \ + const uint32_t nb3 = dst->nb[3]; + +struct rope_th_ctx { + int32_t n_dims; + int32_t mode; + int32_t n_ctx_orig; + int32_t sections[4]; + + float freq_base; + float freq_scale; + float ext_factor; + float attn_factor; + float beta_fast; + float beta_slow; + float theta_scale; + float corr_dims[2]; + + struct htp_ops_context * octx; +}; + +static float rope_yarn_ramp(const float low, const float high, const int i0) { + const float y = (i0 / 2 - low) / MAX(0.001f, high - low); + + return (1 - MIN(1, MAX(0, y))); +} + +static void rope_cache_init(const float theta_base, + float freq_scale, + const float * freq_factors, + float * corr_dims, + uint32_t ne0, + float ext_factor, + float mscale, + float * cache, + float theta_scale) { + // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py + float theta = theta_base; + + for (uint32_t i0 = 0; i0 < ne0; i0 += 2) { + const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; + + float theta_extrap = theta / ff; + + // Get n-d rotational scaling corrected for extrapolation + float theta_interp = freq_scale * theta_extrap; + float theta2 = theta_interp; + + if (ext_factor != 0.0f) { + float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; + theta2 = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; + + // Get n-d magnitude scaling corrected for interpolation + mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); + } + + cache[i0 + 0] = cosf(theta2) * mscale; + cache[i0 + 1] = sinf(theta2) * mscale; + + theta *= theta_scale; + } +} + +#define M_PI 3.1415926535897932384626433 + +static void rope_corr_dims(int n_dims, + int n_ctx_orig, + float freq_base, + float beta_fast, + float beta_slow, + float * dims) { + float start = floorf(n_dims * logf(n_ctx_orig / (beta_fast * 2 * (float) M_PI)) / (2 * logf(freq_base))); + float end = ceilf(n_dims * logf(n_ctx_orig / (beta_slow * 2 * (float) M_PI)) / (2 * logf(freq_base))); + dims[0] = MAX(0, start); + dims[1] = MIN(n_dims - 1, end); +} + +static void init_rope_ctx(struct rope_th_ctx * rope_ctx, struct htp_ops_context * octx) { + memset(rope_ctx, 0, sizeof(struct rope_th_ctx)); + + const int32_t * op_params = &octx->op_params[0]; + + rope_ctx->n_dims = ((const int32_t *) op_params)[1]; + rope_ctx->mode = ((const int32_t *) op_params)[2]; + rope_ctx->n_ctx_orig = ((const int32_t *) op_params)[4]; + + memcpy(&rope_ctx->freq_base, (int32_t *) op_params + 5, sizeof(float)); + memcpy(&rope_ctx->freq_scale, (int32_t *) op_params + 6, sizeof(float)); + memcpy(&rope_ctx->ext_factor, (int32_t *) op_params + 7, sizeof(float)); + memcpy(&rope_ctx->attn_factor, (int32_t *) op_params + 8, sizeof(float)); + memcpy(&rope_ctx->beta_fast, (int32_t *) op_params + 9, sizeof(float)); + memcpy(&rope_ctx->beta_slow, (int32_t *) op_params + 10, sizeof(float)); + memcpy(&rope_ctx->sections, (int32_t *) op_params + 11, sizeof(int) * 4); + + rope_ctx->theta_scale = powf(rope_ctx->freq_base, -2.0f / rope_ctx->n_dims); + + rope_corr_dims(rope_ctx->n_dims, rope_ctx->n_ctx_orig, rope_ctx->freq_base, rope_ctx->beta_fast, + rope_ctx->beta_slow, rope_ctx->corr_dims); + + rope_ctx->octx = octx; + FARF(HIGH, "rope-f32 n_dims:%d, ext_factor:%.6f, theta_scale:%.6f, attn_factor:%.6f\n", rope_ctx->n_dims, + rope_ctx->ext_factor, rope_ctx->theta_scale, rope_ctx->attn_factor); +} + +static void hvx_calc_rope_f32(const float * restrict src0, + float * restrict dst, + const int num_elems, + const float * restrict theta_cache) { + // for (int i = 0; i < num_elems; i += 2) { + //const float cos_theta = theta_cache[i + 0]; + //const float sin_theta = theta_cache[i + 1]; + + //const float x0 = src[0]; + //const float x1 = src[1]; + + //dst[0] = x0*cos_theta - x1*sin_theta; + //dst[1] = x0*sin_theta + x1*cos_theta; + + //src += 2; + //dst += 2; + // } + + const uint8_t * restrict src0_curr = (const uint8_t *) src0; + const uint8_t * restrict theta_curr = (const uint8_t *) theta_cache; + uint8_t * restrict dst_curr = (uint8_t *) dst; + + int step_of_1 = num_elems >> 6; // 6 because we process two vectors at once + + for (int i = 0; i < step_of_1; i++) { + HVX_Vector v0 = *(HVX_Vector *) src0_curr; + HVX_Vector v1 = *(HVX_Vector *) (src0_curr + VLEN); + + HVX_Vector v2 = *(HVX_Vector *) theta_curr; + HVX_Vector v3 = *(HVX_Vector *) (theta_curr + VLEN); + + HVX_VectorPair vx0_x1 = Q6_W_vdeal_VVR(v1, v0, -4); // vx0_x1[0] = x0, vx0_x1[1] = x1 + HVX_VectorPair vcos_sin = Q6_W_vdeal_VVR(v3, v2, -4); // vcos_sin[0] = cos_theta, vcos_sin[1] = sin_theta + + HVX_Vector vx0_c = Q6_Vqf32_vmpy_VsfVsf(Q6_V_lo_W(vx0_x1), Q6_V_lo_W(vcos_sin)); + HVX_Vector vx0_s = Q6_Vqf32_vmpy_VsfVsf(Q6_V_lo_W(vx0_x1), Q6_V_hi_W(vcos_sin)); + HVX_Vector vx1_c = Q6_Vqf32_vmpy_VsfVsf(Q6_V_hi_W(vx0_x1), Q6_V_lo_W(vcos_sin)); + HVX_Vector vx1_s = Q6_Vqf32_vmpy_VsfVsf(Q6_V_hi_W(vx0_x1), Q6_V_hi_W(vcos_sin)); + + HVX_Vector v4 = Q6_Vqf32_vsub_Vqf32Vqf32(vx0_c, vx1_s); + HVX_Vector v5 = Q6_Vqf32_vadd_Vqf32Vqf32(vx0_s, vx1_c); + + HVX_VectorPair vstore = Q6_W_vshuff_VVR(Q6_Vsf_equals_Vqf32(v5), Q6_Vsf_equals_Vqf32(v4), -4); + + *(HVX_Vector *) dst_curr = Q6_V_lo_W(vstore); + *(HVX_Vector *) (dst_curr + VLEN) = Q6_V_hi_W(vstore); + + src0_curr += 2 * VLEN; + theta_curr += 2 * VLEN; + dst_curr += 2 * VLEN; + } +} + +static void rope_hex_f32(struct rope_th_ctx * rope_ctx, + const uint32_t ir0, + const uint32_t ir1, + int nth, + int ith, + int opt_path) { + struct htp_ops_context * octx = rope_ctx->octx; + + const struct htp_tensor * src0 = &octx->src0; + const struct htp_tensor * src1 = &octx->src1; + const struct htp_tensor * src2 = &octx->src2; + struct htp_tensor * dst = &octx->dst; + + htp_rope_preamble; + + const int32_t * pos = (const int32_t *) src1->data; + + float * wp0 = (float *) (octx->src0_spad.data + (ith * nb01)); + + const float * freq_factors = NULL; + if (src2 != NULL) { + freq_factors = (const float *) src2->data; + } + + int ir = 0; + + for (uint32_t i3 = 0; i3 < ne3; i3++) { // batch + for (uint32_t i2 = 0; i2 < ne2; i2++) { // seq-len + const int32_t p = pos[i2]; + + rope_cache_init(p, rope_ctx->freq_scale, freq_factors, rope_ctx->corr_dims, ne0, rope_ctx->ext_factor, + rope_ctx->attn_factor, wp0, rope_ctx->theta_scale); + + for (uint32_t i1 = 0; i1 < ne1; i1++) { // attn-heads + if (ir++ < ir0) { + continue; + } + if (ir > ir1) { + break; + } + + const float * src = (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + float * dst_data = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + + const float * src_loc = src; + float * dst_data_loc = dst_data; + + if (1 == opt_path) { + hvx_calc_rope_f32(src_loc, dst_data_loc, rope_ctx->n_dims, wp0); + } else { + for (uint32_t i0 = 0; i0 < rope_ctx->n_dims; i0 += 2) { + const float cos_theta = wp0[i0 + 0]; + const float sin_theta = wp0[i0 + 1]; + + const float x0 = src_loc[0]; + const float x1 = src_loc[1]; + + dst_data_loc[0] = x0 * cos_theta - x1 * sin_theta; + dst_data_loc[1] = x0 * sin_theta + x1 * cos_theta; + + src_loc += 2; + dst_data_loc += 2; + } + } + + for (uint32_t i0 = rope_ctx->n_dims; i0 < ne0; i0 += 2) { + dst_data_loc[0] = src_loc[0]; + dst_data_loc[1] = src_loc[1]; + + src_loc += 2; + dst_data_loc += 2; + } + } + } + } +} + +static void rope_job_f32_per_thread(struct rope_th_ctx * rope_ctx, int nth, int ith) { + struct htp_ops_context * octx = rope_ctx->octx; + + const struct htp_tensor * src0 = &octx->src0; + const struct htp_tensor * src1 = &octx->src1; + struct htp_tensor * dst = &octx->dst; + + htp_rope_preamble; + + const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows + const uint32_t src0_nrows_per_thread = octx->src0_nrows_per_thread; + + const uint32_t src0_start_row = src0_nrows_per_thread * ith; + const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); + + // no work for this thread + if (src0_start_row >= src0_end_row) { + return; + } + + uint64_t t1, t2; + t1 = HAP_perf_get_qtimer_count(); + + int is_aligned = 1; + int opt_path = 0; + if ((0 == htp_is_aligned((void *) src0->data, VLEN)) || (0 == htp_is_aligned((void *) src1->data, VLEN)) || + (0 == htp_is_aligned((void *) dst->data, VLEN))) { + FARF(HIGH, "rope-f32: unaligned addresses in rope op, possibly slower execution\n"); + is_aligned = 0; + } + if ((1 == is_aligned) && !(nb01 & (VLEN - 1))) { + opt_path = 1; + } + + rope_hex_f32(rope_ctx, src0_start_row, src0_end_row, nth, ith, opt_path); + + t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "rope-f32: %d/%d/%d: (%u:%u) usec %u\n", ith, nth, opt_path, src0_start_row, src0_end_row, + (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +static void rope_job_dispatcher_f32(unsigned int n, unsigned int i, void * data) { + struct rope_th_ctx * rope_ctx = (struct rope_th_ctx *) data; + + rope_job_f32_per_thread(rope_ctx, n, i); +} + +static int execute_op_rope_f32(struct htp_ops_context * octx) { + int err = HTP_STATUS_OK; + + const struct htp_tensor * src0 = &octx->src0; + const struct htp_tensor * src1 = &octx->src1; + const struct htp_tensor * src2 = &octx->src2; + struct htp_tensor * dst = &octx->dst; + + worker_callback_t op_func; + const char * op_type = NULL; + + struct rope_th_ctx rope_ctx; + + switch (octx->op) { + case HTP_OP_ROPE: + op_func = rope_job_dispatcher_f32; + op_type = "rope-f32"; + + init_rope_ctx(&rope_ctx, octx); + break; + + default: + FARF(ERROR, "Unsupported Op %u\n", octx->op); + return HTP_STATUS_NO_SUPPORT; + } + + const uint32_t n_threads = octx->n_threads; + + const size_t src0_row_size = src0->nb[1]; + const size_t src1_row_size = src0_row_size; + const size_t dst_row_size = dst->nb[1]; + + // VTCM scratchpads for all tensors + // N rows per thread, padded to HVX vector size + octx->dst_spad.size = htp_round_up(dst_row_size, 128) * n_threads; + octx->src0_spad.size = htp_round_up(src0_row_size, 128) * n_threads; + octx->src1_spad.size = htp_round_up(src1_row_size, 128) * n_threads; + + size_t spad_size = octx->src0_spad.size + octx->src1_spad.size + octx->dst_spad.size; + + if (src2->ne[0]) { + FARF(HIGH, + "%s: %ux%ux%ux%u (x %ux%ux%ux%u x %ux%ux%ux%u) -> %ux%ux%ux%u : src0-spad-size %u src1-spad-size %u " + "dst-spad-size %u\n", + op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], + src1->ne[3], src2->ne[0], src2->ne[1], src2->ne[2], src2->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], + dst->ne[3], octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size); + } else { + FARF(HIGH, + "%s: %ux%ux%ux%u (%ux%ux%ux%u) -> %ux%ux%ux%u : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", + op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], + src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], octx->src0_spad.size, octx->src1_spad.size, + octx->dst_spad.size); + } + + // Make sure the reserved vtcm size is sufficient + if (octx->ctx->vtcm_size < spad_size) { + FARF(ERROR, "%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, octx->ctx->vtcm_size, + spad_size); + return HTP_STATUS_VTCM_TOO_SMALL; + } + + octx->src0_spad.data = octx->ctx->vtcm_base; + octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size; + octx->dst_spad.data = octx->src1_spad.data + octx->src1_spad.size; + + uint32_t src0_nrows = src0->ne[1] * src0->ne[2] * src0->ne[3]; + + if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { + uint32_t n_jobs = MIN(n_threads, src0_nrows); + octx->src0_nrows_per_thread = (src0_nrows + n_jobs - 1) / n_jobs; + worker_pool_run_func(octx->ctx->worker_pool, op_func, &rope_ctx, n_jobs); + } + + return err; +} + +int op_rope(struct htp_ops_context * octx) { + int err = HTP_STATUS_OK; + + switch (octx->src0.type) { + case HTP_TYPE_F32: + err = execute_op_rope_f32(octx); + break; + + default: + err = HTP_STATUS_NO_SUPPORT; + break; + } + + return err; +} diff --git a/ggml/src/ggml-hexagon/htp/softmax-ops.c b/ggml/src/ggml-hexagon/htp/softmax-ops.c new file mode 100644 index 0000000000000..5bf0cbf7922bb --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/softmax-ops.c @@ -0,0 +1,402 @@ +#pragma clang diagnostic ignored "-Wunused-variable" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wunused-but-set-variable" + +#ifdef HTP_DEBUG +# define FARF_HIGH 1 +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GGML_COMMON_DECL_C +#include "ggml-common.h" +#include "htp-ctx.h" +#include "htp-dma.h" +#include "htp-msg.h" +#include "htp-ops.h" +#include "hvx-utils.h" +#include "ops-utils.h" + +#define htp_softmax_preamble3 \ + const uint32_t ne00 = src0->ne[0]; \ + const uint32_t ne01 = src0->ne[1]; \ + const uint32_t ne02 = src0->ne[2]; \ + const uint32_t ne03 = src0->ne[3]; \ + \ + const uint32_t nb00 = src0->nb[0]; \ + const uint32_t nb01 = src0->nb[1]; \ + const uint32_t nb02 = src0->nb[2]; \ + const uint32_t nb03 = src0->nb[3]; \ + \ + const uint32_t ne10 = (src1->ne[0]) ? src1->ne[0] : 1; \ + const uint32_t ne11 = (src1->ne[0]) ? src1->ne[1] : 1; \ + const uint32_t ne12 = (src1->ne[0]) ? src1->ne[2] : 1; \ + const uint32_t ne13 = (src1->ne[0]) ? src1->ne[3] : 1; \ + \ + const uint32_t nb10 = (src1->ne[0]) ? src1->nb[0] : 1; \ + const uint32_t nb11 = (src1->ne[0]) ? src1->nb[1] : 1; \ + const uint32_t nb12 = (src1->ne[0]) ? src1->nb[2] : 1; \ + const uint32_t nb13 = (src1->ne[0]) ? src1->nb[3] : 1; \ + \ + const uint32_t ne0 = dst->ne[0]; \ + const uint32_t ne1 = dst->ne[1]; \ + const uint32_t ne2 = dst->ne[2]; \ + const uint32_t ne3 = dst->ne[3]; \ + \ + const uint32_t nb0 = dst->nb[0]; \ + const uint32_t nb1 = dst->nb[1]; \ + const uint32_t nb2 = dst->nb[2]; \ + const uint32_t nb3 = dst->nb[3]; + +struct softmax_th_ctx { + bool use_f16; + bool use_src1; + uint32_t n_head; + uint32_t n_head_log2; + + float scale; + float max_bias; + float m0; + float m1; + + struct htp_ops_context * octx; +}; + +static void init_softmax_ctx(struct softmax_th_ctx * softmax_ctx, struct htp_ops_context * octx) { + const struct htp_tensor * src0 = &octx->src0; + const struct htp_tensor * src1 = &octx->src1; + + memset(softmax_ctx, 0, sizeof(struct softmax_th_ctx)); + + memcpy(&softmax_ctx->scale, (float *) octx->op_params, sizeof(float)); + memcpy(&softmax_ctx->max_bias, (float *) octx->op_params + 1, sizeof(float)); + + softmax_ctx->n_head = src0->ne[2]; + softmax_ctx->n_head_log2 = 1u << (uint32_t) floor(log2(softmax_ctx->n_head)); + + softmax_ctx->m0 = powf(2.0f, -(softmax_ctx->max_bias) / softmax_ctx->n_head_log2); + softmax_ctx->m1 = powf(2.0f, -(softmax_ctx->max_bias / 2.0f) / softmax_ctx->n_head_log2); + + softmax_ctx->use_src1 = (src1->ne[0] != 0); + softmax_ctx->use_f16 = (src1->ne[0] != 0) && (src1->type == HTP_TYPE_F16); + + softmax_ctx->octx = octx; +} + +static void hvx_fast_softmax_prep_f32(const uint8_t * restrict src, + uint8_t * restrict dst, + const int num_elems, + float scale, + const uint8_t * restrict mask, + float slope) { + const uint8_t * restrict src_curr = src; + uint8_t * restrict dst_curr = dst; + const uint8_t * restrict mask_curr = mask; + + HVX_Vector scale_vec = hvx_vec_splat_fp32(scale); + HVX_Vector slope_vec = hvx_vec_splat_fp32(slope); + + int step_of_1 = num_elems >> 5; + + #pragma unroll(4) + for (int i = 0; i < step_of_1; i++) { + HVX_Vector v1 = *(HVX_Vector *) src_curr; + + HVX_Vector v3 = *(HVX_Vector *) mask_curr; + + HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v1, scale_vec); + + HVX_Vector v4 = Q6_Vqf32_vmpy_VsfVsf(v3, slope_vec); + + HVX_Vector v5 = Q6_Vqf32_vadd_Vqf32Vqf32(v2, v4); + + *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v5); + + src_curr += VLEN; + dst_curr += VLEN; + mask_curr += VLEN; + } +} + +static void hvx_fast_softmax_f32(const uint8_t * restrict src, + uint8_t * restrict dst, + uint8_t * restrict pad, + const int num_elems) { + const HVX_Vector * restrict v_src = (HVX_Vector *) src; + HVX_Vector * restrict v_pad = (HVX_Vector *) pad; + HVX_Vector * restrict v_dst = (HVX_Vector *) dst; + + HVX_Vector sum_vec = Q6_V_vsplat_R(0x00000000); + HVX_Vector max_vec = hvx_vec_splat_fp32(((const float *) src)[0]); + HVX_Vector zero_v = Q6_V_vzero(); + HVX_Vector one_v = hvx_vec_splat_fp32(1.0); + + int step_of_1 = num_elems >> 5; + + #pragma unroll(4) + for (int i = 0; i < step_of_1; i++) { + HVX_Vector v1 = v_src[i]; + max_vec = Q6_Vsf_vmax_VsfVsf(max_vec, v1); + } + + HVX_Vector v = hvx_vec_reduce_max_fp32(max_vec); + max_vec = hvx_vec_repl4(v); + + #pragma unroll(4) + for (int i = 0; i < step_of_1; i++) { + HVX_Vector v1 = v_src[i]; + HVX_Vector v2 = Q6_Vqf32_vsub_VsfVsf(v1, max_vec); + + HVX_Vector v3 = hvx_vec_exp_fp32(Q6_Vsf_equals_Vqf32(v2)); + + sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), v3); + + v_pad[i] = v3; + } + + v = hvx_vec_qf32_reduce_sum(sum_vec); + sum_vec = hvx_vec_repl4(Q6_Vsf_equals_Vqf32(v)); + + HVX_VectorPred pos_sum = Q6_Q_vcmp_gt_VwVw(sum_vec, zero_v); + HVX_Vector v4 = hvx_vec_inverse_fp32(sum_vec); + HVX_Vector scale_vec = Q6_V_vmux_QVV(pos_sum, v4, one_v); + + #pragma unroll(4) + for (int i = 0; i < step_of_1; i++) { + HVX_Vector v1 = v_pad[i]; + HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v1, scale_vec); + v_dst[i] = Q6_Vsf_equals_Vqf32(v2); + } +} + +static float hvx_softmax_f32(const uint8_t * restrict src, + uint8_t * restrict dst, + uint8_t * restrict spad, + const int num_elems, + const float max) { + hvx_sub_scalar_f32(src, max, spad, num_elems); + + hvx_exp_f32(spad, dst, num_elems, false); + + float sum = hvx_self_sum_f32(dst, num_elems); + + return sum; +} + +static void softmax_htp_f32(int nth, int ith, struct softmax_th_ctx * softmax_ctx, int opt_path) { + struct htp_ops_context * octx = softmax_ctx->octx; + + const struct htp_tensor * src0 = &octx->src0; + const struct htp_tensor * src1 = &octx->src1; + const struct htp_tensor * dst = &octx->dst; + + htp_softmax_preamble3; + + uint8_t * src0_spad_data = octx->src0_spad.data + (ith * nb01); + uint8_t * src1_spad_data = octx->src1_spad.data + (ith * nb01); + uint8_t * dst_spad_data = octx->dst_spad.data + (ith * nb1); + + float * wp0 = (float *) src0_spad_data; + float * wp1 = (float *) src1_spad_data; + float * wp2 = (float *) dst_spad_data; + + for (uint32_t i03 = 0; i03 < ne03; i03++) { + for (uint32_t i02 = 0; i02 < ne02; i02++) { + for (uint32_t i01 = ith; i01 < ne01; i01 += nth) { + const uint32_t i11 = i01; + const uint32_t i12 = i02 % ne12; + const uint32_t i13 = i03 % ne13; + + // ALiBi + const uint32_t h = i02; // head + + const float slope = (softmax_ctx->max_bias > 0.0f) ? + h < softmax_ctx->n_head_log2 ? + powf(softmax_ctx->m0, h + 1) : + powf(softmax_ctx->m1, 2 * (h - softmax_ctx->n_head_log2) + 1) : + 1.0f; + + float * sp = (float *) ((char *) octx->src0.data + i01 * nb01 + i02 * nb02 + i03 * nb03); + float * dp = (float *) ((char *) octx->dst.data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + // broadcast the mask across rows + __fp16 * mp_f16 = (softmax_ctx->use_src1) ? + (__fp16 *) ((char *) octx->src1.data + i11 * nb11 + i12 * nb12 + i13 * nb13) : + NULL; + float * mp_f32 = (softmax_ctx->use_src1) ? + (float *) ((char *) octx->src1.data + i11 * nb11 + i12 * nb12 + i13 * nb13) : + NULL; + + if ((1 == opt_path) && (mp_f32) && !(softmax_ctx->use_f16)) { + hvx_fast_softmax_prep_f32((const uint8_t *) sp, (uint8_t *) wp0, ne00, softmax_ctx->scale, + (const uint8_t *) mp_f32, slope); + } else { + hvx_scale_f32((const uint8_t *) sp, (uint8_t *) wp0, ne00, softmax_ctx->scale); + if (mp_f32) { + if (softmax_ctx->use_f16) { + for (int i = 0; i < ne00; ++i) { + wp0[i] += slope * (float) mp_f16[i]; + } + } else { + for (int i = 0; i < ne00; ++i) { + wp0[i] += slope * mp_f32[i]; + } + } + } + } + + if (1 == opt_path) { + hvx_fast_softmax_f32((const uint8_t *) wp0, (uint8_t *) dp, (uint8_t *) wp1, ne00); + } else { + float max = hvx_self_max_f32((const uint8_t *) wp0, ne00); + float sum = hvx_softmax_f32((const uint8_t *) wp0, (uint8_t *) wp2, (uint8_t *) wp1, ne00, max); + sum = sum > 0.0 ? (1.0 / sum) : 1; + hvx_scale_f32((const uint8_t *) wp2, (uint8_t *) dp, ne00, sum); + } + } + } + } +} + +static void softmax_job_f32_per_thread(struct softmax_th_ctx * softmax_ctx, int nth, int ith) { + struct htp_ops_context * octx = softmax_ctx->octx; + + const struct htp_tensor * src0 = &octx->src0; + const struct htp_tensor * src1 = &octx->src1; + struct htp_tensor * dst = &octx->dst; + + htp_softmax_preamble3; + + const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows + const uint32_t src0_nrows_per_thread = octx->src0_nrows_per_thread; + + const uint32_t src0_start_row = src0_nrows_per_thread * ith; + const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); + + // no work for this thread + if (src0_start_row >= src0_end_row) { + return; + } + + uint64_t t1, t2; + t1 = HAP_perf_get_qtimer_count(); + + int is_aligned = 1; + int opt_path = 0; + if (!htp_is_aligned((void *) src0->data, VLEN) || !htp_is_aligned((void *) dst->data, VLEN)) { + is_aligned = 0; + FARF(HIGH, "softmax-f32: unaligned addresses in elementwise op, possibly slower execution\n"); + } + if ((1 == is_aligned) && !(nb01 & (VLEN - 1))) { + opt_path = 1; + } + + softmax_htp_f32(nth, ith, softmax_ctx, opt_path); + + t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "softmax-f32 %d/%d/%d/%d: %ux%ux%ux%u (%u:%u) x %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", ith, nth, + softmax_ctx->use_f16, opt_path, ne00, ne01, ne02, ne03, src0_start_row, src0_end_row, ne10, ne11, ne12, ne13, + ne0, ne1, ne2, ne3, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +static void softmax_job_dispatcher_f32(unsigned int n, unsigned int i, void * p_data) { + struct softmax_th_ctx * p_softmax_ctx = (struct softmax_th_ctx *) p_data; + softmax_job_f32_per_thread(p_softmax_ctx, n, i); +} + +static int execute_op_softmax_f32(struct htp_ops_context * octx) { + int err = HTP_STATUS_OK; + + const struct htp_tensor * src0 = &octx->src0; + const struct htp_tensor * src1 = &octx->src1; + struct htp_tensor * dst = &octx->dst; + + worker_callback_t op_func; + const char * op_type = NULL; + + struct softmax_th_ctx softmax_ctx; + + switch (octx->op) { + case HTP_OP_SOFTMAX: + op_func = softmax_job_dispatcher_f32; + op_type = "softmax-f32"; + + init_softmax_ctx(&softmax_ctx, octx); + break; + + default: + FARF(ERROR, "Unsupported Op %u\n", octx->op); + return HTP_STATUS_NO_SUPPORT; + } + + const uint32_t n_threads = octx->n_threads; + + const size_t src0_row_size = src0->nb[1]; + const size_t src1_row_size = src0_row_size; + const size_t dst_row_size = dst->nb[1]; + + // VTCM scratchpads for all tensors + // N rows per thread, padded to HVX vector size + octx->dst_spad.size = htp_round_up(dst_row_size, 128) * n_threads; + octx->src0_spad.size = htp_round_up(src0_row_size, 128) * n_threads; + octx->src1_spad.size = htp_round_up(src1_row_size, 128) * n_threads; + + size_t spad_size = octx->src0_spad.size + octx->src1_spad.size + octx->dst_spad.size; + + if (src1->ne[0]) { + FARF(HIGH, + "%s: %ux%ux%ux%u x %ux%ux%ux%u -> %ux%ux%ux%u : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", + op_type, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], + src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], octx->src0_spad.size, octx->src1_spad.size, + octx->dst_spad.size); + } else { + FARF(HIGH, "%s: %ux%ux%ux%u -> %ux%ux%ux%u : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", op_type, + src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], + octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size); + } + + // Make sure the reserved vtcm size is sufficient + if (octx->ctx->vtcm_size < spad_size) { + FARF(ERROR, "%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, octx->ctx->vtcm_size, + spad_size); + return HTP_STATUS_VTCM_TOO_SMALL; + } + + octx->src0_spad.data = octx->ctx->vtcm_base; + octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size; + octx->dst_spad.data = octx->src1_spad.data + octx->src1_spad.size; + + uint32_t src0_nrows = src0->ne[1] * src0->ne[2] * src0->ne[3]; + + if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { + uint32_t n_jobs = MIN(n_threads, src0_nrows); + octx->src0_nrows_per_thread = (src0_nrows + n_jobs - 1) / n_jobs; + worker_pool_run_func(octx->ctx->worker_pool, op_func, &softmax_ctx, n_jobs); + } + + return err; +} + +int op_softmax(struct htp_ops_context * octx) { + int err = HTP_STATUS_OK; + + switch (octx->src0.type) { + case HTP_TYPE_F32: + err = execute_op_softmax_f32(octx); + break; + + default: + err = HTP_STATUS_NO_SUPPORT; + break; + } + + return err; +} diff --git a/ggml/src/ggml-hexagon/htp/unary-ops.c b/ggml/src/ggml-hexagon/htp/unary-ops.c new file mode 100644 index 0000000000000..bb7557b025267 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/unary-ops.c @@ -0,0 +1,255 @@ +#pragma clang diagnostic ignored "-Wunused-variable" +#pragma clang diagnostic ignored "-Wunused-function" +#pragma clang diagnostic ignored "-Wunused-but-set-variable" + +#ifdef HTP_DEBUG +# define FARF_HIGH 1 +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GGML_COMMON_DECL_C +#include "ggml-common.h" +#include "htp-ctx.h" +#include "htp-dma.h" +#include "htp-msg.h" +#include "htp-ops.h" +#include "hvx-utils.h" +#include "ops-utils.h" + +#define htp_unary_preamble \ + const uint32_t ne00 = src->ne[0]; \ + const uint32_t ne01 = src->ne[1]; \ + const uint32_t ne02 = src->ne[2]; \ + const uint32_t ne03 = src->ne[3]; \ + \ + const uint32_t ne0 = dst->ne[0]; \ + const uint32_t ne1 = dst->ne[1]; \ + const uint32_t ne2 = dst->ne[2]; \ + const uint32_t ne3 = dst->ne[3]; \ + \ + const uint32_t nb00 = src->nb[0]; \ + const uint32_t nb01 = src->nb[1]; \ + const uint32_t nb02 = src->nb[2]; \ + const uint32_t nb03 = src->nb[3]; \ + \ + const uint32_t nb0 = dst->nb[0]; \ + const uint32_t nb1 = dst->nb[1]; \ + const uint32_t nb2 = dst->nb[2]; \ + const uint32_t nb3 = dst->nb[3]; + +static void hvx_fast_rms_norm_f32(const uint8_t * restrict src, + uint8_t * restrict dst, + uint8_t * restrict pad, + const int num_elems, + float epsilon) { + const HVX_Vector * restrict v_src = (HVX_Vector *) src; + HVX_Vector * restrict v_dst = (HVX_Vector *) dst; + + HVX_Vector sum_v = Q6_V_vsplat_R(0x00000000); + HVX_Vector epsilon_v = hvx_vec_splat_fp32(epsilon); + + int step_of_1 = num_elems >> 5; + #pragma unroll(4) + for (int i = 0; i < step_of_1; i++) { + HVX_Vector v1 = v_src[i]; + HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v1, v1); + sum_v = Q6_Vqf32_vadd_Vqf32Vqf32(sum_v, v2); + } + + HVX_Vector reduced_sum = hvx_vec_qf32_reduce_sum(sum_v); + sum_v = hvx_vec_repl4(Q6_Vsf_equals_Vqf32(reduced_sum)); + + HVX_Vector t_v = hvx_vec_splat_fp32((float) num_elems); + HVX_Vector denom_v = hvx_vec_inverse_fp32(t_v); + HVX_Vector mean_v = Q6_Vqf32_vmpy_VsfVsf(sum_v, denom_v); + HVX_Vector mean_epsilon_v = Q6_Vqf32_vadd_Vqf32Vsf(mean_v, epsilon_v); + + HVX_Vector scale_v = hvx_vec_rsqrt_fp32(Q6_Vsf_equals_Vqf32(mean_epsilon_v)); + + #pragma unroll(4) + for (int i = 0; i < step_of_1; i++) { + HVX_Vector v1 = v_src[i]; + HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v1, scale_v); + v_dst[i] = Q6_Vsf_equals_Vqf32(v2); + } +} + +static void rms_norm_htp_f32(const float * restrict src, + float * restrict dst, + uint8_t * restrict spad, + const uint32_t num_rows, + const uint32_t row_elems, + const size_t row_size, + int32_t * op_params, + int opt_path) { + float epsilon = 0.f; + memcpy(&epsilon, op_params, sizeof(float)); + + for (uint32_t ir = 0; ir < num_rows; ir++) { + const float * restrict src_local = src + (ir * row_elems); + float * restrict dst_local = dst + (ir * row_elems); + + if (ir + 1 < num_rows) { + htp_l2fetch(src_local + row_elems, 1, row_size, row_size); + } + + if (1 == opt_path) { + hvx_fast_rms_norm_f32((const uint8_t *) src_local, (uint8_t *) dst_local, spad, row_elems, epsilon); + } else { + float sum = hvx_sum_of_squares_f32((const uint8_t *) src_local, row_elems); + + const float mean = sum / row_elems; + const float scale = 1.0f / sqrtf(mean + epsilon); + + hvx_scale_f32((const uint8_t *) src_local, (uint8_t *) dst_local, row_elems, scale); + } + } +} + +static void unary_job_f32_per_thread(const struct htp_tensor * src, + struct htp_tensor * dst, + uint8_t * spad, + int htp_op, + int32_t * op_params, + uint32_t nth, + uint32_t ith, + uint32_t src0_nrows_per_thread) { + htp_unary_preamble; + + const size_t src0_row_size = nb01; + const size_t dst_row_size = nb1; + + const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows + + const uint32_t src0_start_row = src0_nrows_per_thread * ith; + const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows); + + // no work for this thread + if (src0_start_row >= src0_end_row) { + return; + } + + uint64_t t1, t2; + t1 = HAP_perf_get_qtimer_count(); + + int is_aligned = 1; + int opt_path = 0; + if ((0 == htp_is_aligned((void *) src->data, VLEN)) || (0 == htp_is_aligned((void *) dst->data, VLEN))) { + is_aligned = 0; + FARF(HIGH, "unary-f32: unaligned addresses in unary op, possibly slower execution\n"); + } + if ((1 == is_aligned) && !(nb01 & (VLEN - 1))) { + opt_path = 1; + } + + const uint8_t * restrict data_src = (const uint8_t *) src->data; + uint8_t * restrict data_dst = (uint8_t *) dst->data; + + const float * restrict src_th = (float *) (data_src + (src0_start_row * src0_row_size)); + float * restrict dst_th = (float *) (data_dst + (src0_start_row * dst_row_size)); + uint8_t * restrict spad_th = (uint8_t *) spad + (ith * nb01); + + switch (htp_op) { + case HTP_OP_RMS_NORM: + rms_norm_htp_f32(src_th, dst_th, spad_th, src0_end_row - src0_start_row, ne0, nb1, op_params, opt_path); + break; + + default: + break; + } + + t2 = HAP_perf_get_qtimer_count(); + + FARF(HIGH, "unary-f32 %d/%d/%d: %ux%ux%ux%u (%u:%u) -> %ux%ux%ux%u usec %u\n", ith, nth, opt_path, src->ne[0], + src->ne[1], src->ne[2], src->ne[3], src0_start_row, src0_end_row, dst->ne[0], dst->ne[1], dst->ne[2], + dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); +} + +static void unary_job_dispatcher_f32(unsigned int n, unsigned int i, void * data) { + struct htp_ops_context * octx = (struct htp_ops_context *) data; + + unary_job_f32_per_thread(&octx->src0, &octx->dst, octx->src0_spad.data, octx->op, octx->op_params, n, i, + octx->src0_nrows_per_thread); +} + +static int execute_op_unary_f32(struct htp_ops_context * octx) { + int err = HTP_STATUS_OK; + + const struct htp_tensor * src0 = &octx->src0; + struct htp_tensor * dst = &octx->dst; + + worker_callback_t unary_op_func; + const char * op_type = NULL; + + switch (octx->op) { + case HTP_OP_RMS_NORM: + unary_op_func = unary_job_dispatcher_f32; + op_type = "rmsnorm-f32"; + break; + + default: + FARF(ERROR, "Unsupported unary Op %u\n", octx->op); + return HTP_STATUS_NO_SUPPORT; + } + + const int n_threads = octx->n_threads; + const uint32_t src0_nrows = src0->ne[1] * src0->ne[2] * src0->ne[3]; + + const size_t src0_row_size = src0->nb[1]; + const size_t dst_row_size = dst->nb[1]; + + // VTCM scratchpads for all tensors + octx->dst_spad.size = htp_round_up(dst_row_size, 128) * n_threads; + octx->src0_spad.size = htp_round_up(src0_row_size, 128) * n_threads; + + size_t spad_size = octx->src0_spad.size + octx->dst_spad.size; + + FARF(HIGH, "%s: (%ux%ux%ux%u) -> (%ux%ux%ux%u) : src0-spad-size %u src1-spad-size %u dst-spad-size %u\n", op_type, + src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], + octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size); + + // Make sure the reserved vtcm size is sufficient + if (octx->ctx->vtcm_size < spad_size) { + FARF(ERROR, "unary-%s : current VTCM reservation %zu is too small, needed %zu\n", op_type, octx->ctx->vtcm_size, + spad_size); + return HTP_STATUS_VTCM_TOO_SMALL; + } + + octx->src0_spad.data = octx->ctx->vtcm_base; + octx->dst_spad.data = octx->src0_spad.data + octx->src0_spad.size; + + if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) { + uint32_t n_jobs = MIN(n_threads, src0_nrows); + + octx->src0_nrows_per_thread = (src0_nrows + n_jobs - 1) / n_jobs; + + worker_pool_run_func(octx->ctx->worker_pool, unary_op_func, octx, n_jobs); + } + + return err; +} + +int op_unary(struct htp_ops_context * octx) { + int err = HTP_STATUS_OK; + + switch (octx->src0.type) { + case HTP_TYPE_F32: + err = execute_op_unary_f32(octx); + break; + + default: + err = HTP_STATUS_NO_SUPPORT; + break; + } + + return err; +} diff --git a/ggml/src/ggml-hexagon/htp/worker-pool.c b/ggml/src/ggml-hexagon/htp/worker-pool.c new file mode 100644 index 0000000000000..cd38c2126c7b2 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/worker-pool.c @@ -0,0 +1,297 @@ +#include "worker-pool.h" + +#include +#include +#include +#include +#include +#include + +#ifdef HTP_DEBUG +# define FARF_HIGH 1 +#endif + +#include "HAP_farf.h" + +#define WORKER_THREAD_STACK_SZ (2 * 16384) +#define LOWEST_USABLE_QURT_PRIO (254) + +struct worker_pool_s; + +// internal structure kept in thread-local storage per instance of worker pool +typedef struct { + struct worker_pool_s * pool; + unsigned int id; +} worker_context_t; + +// internal structure kept in thread-local storage per instance of worker pool +typedef struct worker_pool_s { + worker_pool_job_t job[MAX_NUM_WORKERS]; // list of job descriptors + qurt_thread_t thread[MAX_NUM_WORKERS]; // thread ID's of the workers + worker_context_t context[MAX_NUM_WORKERS]; // worker contexts + void * stack[MAX_NUM_WORKERS]; // thread stack pointers + unsigned int n_threads; // number of workers in this pool + + atomic_uint seqn; // seqno used to detect new jobs + atomic_uint next_job; // next job index + atomic_uint n_pending; // number of pending jobs + atomic_uint n_jobs; // number of current jobs + atomic_bool killed; // threads need to exit +} worker_pool_t; + +static void worker_pool_main(void * context) { + worker_context_t * me = (worker_context_t *) context; + worker_pool_t * pool = me->pool; + + FARF(HIGH, "worker-pool: thread %u started", me->id); + + unsigned int prev_seqn = 0; + while (!atomic_load(&pool->killed)) { + unsigned int seqn = atomic_load(&pool->seqn); + if (seqn == prev_seqn) { + // Nothing to do + qurt_futex_wait(&pool->seqn, prev_seqn); + continue; + } + + // New job + prev_seqn = seqn; + + unsigned int n = atomic_load(&pool->n_jobs); + unsigned int i = atomic_fetch_add(&pool->next_job, 1); + if (i >= n) { + // Spurios wakeup + continue; + } + + pool->job[i].func(n, i, pool->job[i].data); + + atomic_fetch_sub(&pool->n_pending, 1); + } + + FARF(HIGH, "worker-pool: thread %u stopped", me->id); +} + +AEEResult worker_pool_init_with_stack_size(worker_pool_context_t * context, uint32_t n_threads, uint32_t stack_size) { + int err = 0; + + if (NULL == context) { + FARF(ERROR, "NULL context passed to worker_pool_init()."); + return AEE_EBADPARM; + } + + // Allocations + int size = (stack_size * n_threads) + (sizeof(worker_pool_t)); + + unsigned char * mem_blob = (unsigned char *) malloc(size); + if (!mem_blob) { + FARF(ERROR, "Could not allocate memory for worker pool!!"); + return AEE_ENOMEMORY; + } + + worker_pool_t * me = (worker_pool_t *) (mem_blob + stack_size * n_threads); + + // name for the first worker, useful in debugging threads + char name[19]; + snprintf(name, 12, "0x%8x:", (int) me); + strcat(name, "worker0"); + me->n_threads = n_threads; + + // initializations + for (unsigned int i = 0; i < me->n_threads; i++) { + me->stack[i] = NULL; + me->thread[i] = 0; + + me->context[i].id = i; + me->context[i].pool = me; + } + + // initialize job queue + me->n_pending = 0; + me->n_jobs = 0; + me->next_job = 0; + me->seqn = 0; + me->killed = 0; + + // launch the workers + qurt_thread_attr_t attr; + qurt_thread_attr_init(&attr); + + for (unsigned int i = 0; i < me->n_threads; i++) { + // set up stack + me->stack[i] = mem_blob; + mem_blob += stack_size; + qurt_thread_attr_set_stack_addr(&attr, me->stack[i]); + qurt_thread_attr_set_stack_size(&attr, stack_size); + + // set up name + qurt_thread_attr_set_name(&attr, name); + name[17] = (name[17] + 1); + // name threads context:worker0, context:worker1, .. (recycle at 9, but num threads should be less than that anyway) + if (name[17] > '9') { + name[17] = '0'; + } + + // set up priority - by default, match the creating thread's prio + int prio = qurt_thread_get_priority(qurt_thread_get_id()); + + if (prio < 1) { + prio = 1; + } + if (prio > LOWEST_USABLE_QURT_PRIO) { + prio = LOWEST_USABLE_QURT_PRIO; + } + + qurt_thread_attr_set_priority(&attr, prio); + + // launch + err = qurt_thread_create(&me->thread[i], &attr, worker_pool_main, (void *) &me->context[i]); + if (err) { + FARF(ERROR, "Could not launch worker threads!"); + worker_pool_release((worker_pool_context_t *) &me); + return AEE_EQURTTHREADCREATE; + } + } + *context = (worker_pool_context_t *) me; + return AEE_SUCCESS; +} + +AEEResult worker_pool_init(worker_pool_context_t * context, uint32_t n_threads) { + return worker_pool_init_with_stack_size(context, n_threads, WORKER_THREAD_STACK_SZ); +} + +// clean up worker pool +void worker_pool_release(worker_pool_context_t * context) { + worker_pool_t * me = (worker_pool_t *) *context; + + // if no worker pool exists, return error. + if (NULL == me) { + return; + } + + atomic_store(&me->killed, 1); + atomic_fetch_add(&me->seqn, 1); + qurt_futex_wake(&me->seqn, me->n_threads); + + // de-initializations + for (unsigned int i = 0; i < me->n_threads; i++) { + if (me->thread[i]) { + int status; + (void) qurt_thread_join(me->thread[i], &status); + } + } + + // free allocated memory (were allocated as a single buffer starting at stack[0]) + if (me->stack[0]) { + free(me->stack[0]); + } + + *context = NULL; +} + +// run jobs +AEEResult worker_pool_run_jobs(worker_pool_context_t context, worker_pool_job_t * job, unsigned int n) { + worker_pool_t * me = (worker_pool_t *) context; + if (NULL == me) { + FARF(ERROR, "worker-pool: invalid context"); + return AEE_EBADPARM; + } + + if (n > me->n_threads) { + FARF(ERROR, "worker-pool: invalid number of jobs %u for n-threads %u", n, me->n_threads); + return AEE_EBADPARM; + } + + memcpy(me->job, job, sizeof(worker_pool_job_t) * n); + + if (n > 1) { + atomic_store(&me->next_job, 1); + atomic_store(&me->n_jobs, n); + atomic_store(&me->n_pending, n - 1); + + // wake up workers + atomic_fetch_add(&me->seqn, 1); + qurt_futex_wake(&me->seqn, n - 1); + } + + // main thread runs job #0 + me->job[0].func(n, 0, me->job[0].data); + + if (n > 1) { + while (atomic_load(&me->n_pending)) + ; + } + + return 0; +} + +// run func +AEEResult worker_pool_run_func(worker_pool_context_t context, worker_callback_t func, void * data, unsigned int n) { + worker_pool_job_t job[n]; + + for (unsigned int i = 0; i < n; i++) { + job[i].func = func; + job[i].data = data; + } + + return worker_pool_run_jobs(context, job, n); +} + +AEEResult worker_pool_set_thread_priority(worker_pool_context_t context, unsigned int prio) { + worker_pool_t * me = (worker_pool_t *) context; + + // if no worker pool exists, return error. + if (!me) { + return AEE_ENOMORE; + } + + int result = AEE_SUCCESS; + if (prio < 1) { + prio = 1; + } + if (prio > LOWEST_USABLE_QURT_PRIO) { + prio = LOWEST_USABLE_QURT_PRIO; + } + + for (unsigned int i = 0; i < me->n_threads; i++) { + int res = qurt_thread_set_priority(me->thread[i], (unsigned short) prio); + if (0 != res) { + result = AEE_EBADPARM; + FARF(ERROR, "QURT failed to set priority of thread %d, ERROR = %d", me->thread[i], res); + } + } + + return result; +} + +AEEResult worker_pool_retrieve_thread_id(worker_pool_context_t context, unsigned int * tids) { + worker_pool_t * me = (worker_pool_t *) context; + if (!me) { + FARF(ERROR, "worker-pool: invalid context"); + return AEE_EBADPARM; + ; + } + + for (int i = 0; i < me->n_threads; i++) { + tids[i] = me->thread[i]; + } + + return AEE_SUCCESS; +} + +AEEResult worker_pool_get_thread_priority(worker_pool_context_t context, unsigned int * prio) { + worker_pool_t * me = (worker_pool_t *) context; + if (!me) { + FARF(ERROR, "worker-pool: invalid context"); + return AEE_EBADPARM; + } + + int priority = qurt_thread_get_priority(me->thread[0]); + if (priority > 0) { + *prio = priority; + return 0; + } else { + *prio = 0; + return AEE_EBADSTATE; + } +} diff --git a/ggml/src/ggml-hexagon/htp/worker-pool.h b/ggml/src/ggml-hexagon/htp/worker-pool.h new file mode 100644 index 0000000000000..6f8c9056c4b49 --- /dev/null +++ b/ggml/src/ggml-hexagon/htp/worker-pool.h @@ -0,0 +1,57 @@ +#ifndef HTP_WORKER_POOL_H +#define HTP_WORKER_POOL_H + +// MACRO enables function to be visible in shared-library case. +#define WORKERPOOL_API __attribute__((visibility("default"))) + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/// signature of callbacks to be invoked by worker threads +typedef void (*worker_callback_t)(unsigned int n, unsigned int i, void *); + +/// Typedef of worker_pool context +typedef void * worker_pool_context_t; + +/// descriptor for requested callback +typedef struct { + worker_callback_t func; + void * data; +} worker_pool_job_t; + +/// Maximum supported number of worker threads. +#define MAX_NUM_WORKERS 10 + +// Initialize worker pool. +WORKERPOOL_API AEEResult worker_pool_init(worker_pool_context_t * context, uint32_t n_threads); + +// Initialize worker pool with custom stack size +WORKERPOOL_API AEEResult worker_pool_init_with_stack_size(worker_pool_context_t * context, + uint32_t n_threads, + uint32_t stack_size); + +// Kill worker threads and release worker pool resources +WORKERPOOL_API void worker_pool_release(worker_pool_context_t * context); + +// Run jobs with the worker pool. +WORKERPOOL_API AEEResult worker_pool_run_jobs(worker_pool_context_t context, worker_pool_job_t * job, unsigned int n); + +WORKERPOOL_API AEEResult worker_pool_run_func(worker_pool_context_t context, + worker_callback_t func, + void * data, + unsigned int n); + +WORKERPOOL_API AEEResult worker_pool_set_thread_priority(worker_pool_context_t context, unsigned int prio); +WORKERPOOL_API AEEResult worker_pool_get_thread_priority(worker_pool_context_t context, unsigned int * prio); +WORKERPOOL_API AEEResult worker_pool_retrieve_thread_id(worker_pool_context_t context, unsigned int * tids); + +#ifdef __cplusplus +} +#endif + +#endif // #ifndef HTP_WORKER_POOL_H diff --git a/ggml/src/ggml-hip/CMakeLists.txt b/ggml/src/ggml-hip/CMakeLists.txt index 934aefdcb45fa..6b499320e7b12 100644 --- a/ggml/src/ggml-hip/CMakeLists.txt +++ b/ggml/src/ggml-hip/CMakeLists.txt @@ -28,8 +28,10 @@ if (CXX_IS_HIPCC) " Prefer setting the HIP compiler directly. See README for details.") endif() else() - # Forward AMDGPU_TARGETS to CMAKE_HIP_ARCHITECTURES. - if (AMDGPU_TARGETS AND NOT CMAKE_HIP_ARCHITECTURES) + # Forward (AMD)GPU_TARGETS to CMAKE_HIP_ARCHITECTURES. + if(GPU_TARGETS AND NOT CMAKE_HIP_ARCHITECTURES) + set(CMAKE_HIP_ARCHITECTURES ${GPU_TARGETS}) + elseif(AMDGPU_TARGETS AND NOT CMAKE_HIP_ARCHITECTURES) set(CMAKE_HIP_ARCHITECTURES ${AMDGPU_TARGETS}) endif() cmake_minimum_required(VERSION 3.21) diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index d0fb3bccad225..e9201cdc685dc 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -565,14 +565,23 @@ static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) { #define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x) #define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x) +static inline int32_t ggml_node_get_use_count(const struct ggml_cgraph * cgraph, int node_idx) { + const struct ggml_tensor * node = cgraph->nodes[node_idx]; + + size_t hash_pos = ggml_hash_find(&cgraph->visited_hash_set, node); + if (!ggml_bitset_get(cgraph->visited_hash_set.used, hash_pos)) { + return 0; + } + return cgraph->use_counts[hash_pos]; +} + // return true if the node's results are only used by N other nodes // and can be fused into their calculations. static inline bool ggml_node_has_n_uses(const struct ggml_cgraph * cgraph, int node_idx, int32_t n_uses) { const struct ggml_tensor * node = cgraph->nodes[node_idx]; // check the use count against how many we're replacing - size_t hash_pos = ggml_hash_find(&cgraph->visited_hash_set, node); - if (!ggml_bitset_get(cgraph->visited_hash_set.used, hash_pos) || cgraph->use_counts[hash_pos] != n_uses) { + if (ggml_node_get_use_count(cgraph, node_idx) != n_uses) { return false; } @@ -638,6 +647,36 @@ static inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx return ggml_can_fuse_ext(cgraph, idxs, ops, num_ops); } +GGML_API bool ggml_can_fuse_subgraph_ext(const struct ggml_cgraph * cgraph, + const int * node_idxs, + int count, + const enum ggml_op * ops, + const int * outputs, + int num_outputs); + +// Returns true if the subgraph formed by {node_idxs} can be fused +// checks whethers all nodes which are not part of outputs can be elided +// by checking if their num_uses are confined to the subgraph +static inline bool ggml_can_fuse_subgraph(const struct ggml_cgraph * cgraph, + int node_idx, + int count, + const enum ggml_op * ops, + const int * outputs, + int num_outputs) { + GGML_ASSERT(count < 32); + if (node_idx + count > cgraph->n_nodes) { + return false; + } + + int idxs[32]; + + for (int i = 0; i < count; ++i) { + idxs[i] = node_idx + i; + } + + return ggml_can_fuse_subgraph_ext(cgraph, idxs, count, ops, outputs, num_outputs); +} + #ifdef __cplusplus } #endif @@ -651,6 +690,13 @@ inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, std:: return ggml_can_fuse(cgraph, node_idx, ops.begin(), (int)ops.size()); } +inline bool ggml_can_fuse_subgraph(const struct ggml_cgraph * cgraph, + int start_idx, + std::initializer_list ops, + std::initializer_list outputs = {}) { + return ggml_can_fuse_subgraph(cgraph, start_idx, ops.size(), ops.begin(), outputs.begin(), outputs.size()); +} + // expose GGUF internals for test code GGML_API size_t gguf_type_size(enum gguf_type type); GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params); diff --git a/ggml/src/ggml-metal/ggml-metal-device.cpp b/ggml/src/ggml-metal/ggml-metal-device.cpp index 866cd2da58576..75811634227b3 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.cpp +++ b/ggml/src/ggml-metal/ggml-metal-device.cpp @@ -1406,6 +1406,31 @@ ggml_metal_pipeline_t ggml_metal_library_get_pipeline_conv_transpose_1d(ggml_met return res; } +ggml_metal_pipeline_t ggml_metal_library_get_pipeline_conv_transpose_2d(ggml_metal_library_t lib, const ggml_tensor * op) { + assert(op->op == GGML_OP_CONV_TRANSPOSE_2D); + + GGML_ASSERT(ggml_is_contiguous(op->src[0])); + GGML_ASSERT(ggml_is_contiguous(op->src[1])); + GGML_ASSERT(op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32); + GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32); + GGML_ASSERT(op->type == GGML_TYPE_F32); + + char base[256]; + char name[256]; + + snprintf(base, 256, "kernel_conv_transpose_2d_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type)); + snprintf(name, 256, "%s", base); + + ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name); + if (res) { + return res; + } + + res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); + + return res; +} + ggml_metal_pipeline_t ggml_metal_library_get_pipeline_upscale(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_UPSCALE); diff --git a/ggml/src/ggml-metal/ggml-metal-device.h b/ggml/src/ggml-metal/ggml-metal-device.h index 28ae2e1765146..4d58297481813 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.h +++ b/ggml/src/ggml-metal/ggml-metal-device.h @@ -130,6 +130,7 @@ ggml_metal_pipeline_t ggml_metal_library_get_pipeline_norm (ggml_me ggml_metal_pipeline_t ggml_metal_library_get_pipeline_rope (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_im2col (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_conv_transpose_1d (ggml_metal_library_t lib, const struct ggml_tensor * op); +ggml_metal_pipeline_t ggml_metal_library_get_pipeline_conv_transpose_2d (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_upscale (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_pad (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_pad_reflect_1d (ggml_metal_library_t lib, const struct ggml_tensor * op); diff --git a/ggml/src/ggml-metal/ggml-metal-device.m b/ggml/src/ggml-metal/ggml-metal-device.m index c3c83abe4e63e..360fbe19f0fb6 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.m +++ b/ggml/src/ggml-metal/ggml-metal-device.m @@ -653,6 +653,11 @@ bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_te case GGML_OP_SCALE: case GGML_OP_CONV_TRANSPOSE_1D: return true; + case GGML_OP_CONV_TRANSPOSE_2D: + return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]) && + (op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32) && + op->src[1]->type == GGML_TYPE_F32 && + op->type == GGML_TYPE_F32; case GGML_OP_CLAMP: return op->src[0]->type == GGML_TYPE_F32; case GGML_OP_SQR: diff --git a/ggml/src/ggml-metal/ggml-metal-impl.h b/ggml/src/ggml-metal/ggml-metal-impl.h index fa2d82cefb40e..96f43d260a3c3 100644 --- a/ggml/src/ggml-metal/ggml-metal-impl.h +++ b/ggml/src/ggml-metal/ggml-metal-impl.h @@ -514,6 +514,19 @@ typedef struct { uint64_t nb1; } ggml_metal_kargs_conv_transpose_1d; +typedef struct { + int32_t IC; + int32_t IH; + int32_t IW; + int32_t KH; + int32_t KW; + int32_t OC; + int32_t s0; + uint64_t nb0; + uint64_t nb1; + uint64_t nb2; +} ggml_metal_kargs_conv_transpose_2d; + typedef struct { uint64_t ofs0; uint64_t ofs1; diff --git a/ggml/src/ggml-metal/ggml-metal-ops.cpp b/ggml/src/ggml-metal/ggml-metal-ops.cpp index 4f9f6bda00a79..7a85edbdcdb84 100644 --- a/ggml/src/ggml-metal/ggml-metal-ops.cpp +++ b/ggml/src/ggml-metal/ggml-metal-ops.cpp @@ -368,6 +368,10 @@ static int ggml_metal_op_encode_impl(ggml_metal_op_t ctx, int idx) { { n_fuse = ggml_metal_op_conv_transpose_1d(ctx, idx); } break; + case GGML_OP_CONV_TRANSPOSE_2D: + { + n_fuse = ggml_metal_op_conv_transpose_2d(ctx, idx); + } break; case GGML_OP_UPSCALE: { n_fuse = ggml_metal_op_upscale(ctx, idx); @@ -3118,6 +3122,62 @@ int ggml_metal_op_conv_transpose_1d(ggml_metal_op_t ctx, int idx) { return 1; } +int ggml_metal_op_conv_transpose_2d(ggml_metal_op_t ctx, int idx) { + ggml_tensor * op = ctx->node(idx); + + ggml_metal_library_t lib = ctx->lib; + ggml_metal_encoder_t enc = ctx->enc; + + GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); + GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); + GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); + GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); + GGML_TENSOR_LOCALS( int32_t, ne, op, ne); + GGML_TENSOR_LOCALS(uint32_t, nb, op, nb); + + const int32_t s0 = ((const int32_t *)(op->op_params))[0]; + + const int32_t IC = op->src[1]->ne[2]; + const int32_t IH = op->src[1]->ne[1]; + const int32_t IW = op->src[1]->ne[0]; + + const int32_t KH = op->src[0]->ne[1]; + const int32_t KW = op->src[0]->ne[0]; + + const int32_t OW = op->ne[0]; + const int32_t OH = op->ne[1]; + const int32_t OC = op->ne[2]; + + ggml_metal_kargs_conv_transpose_2d args = { + /*.IC =*/ IC, + /*.IH =*/ IH, + /*.IW =*/ IW, + /*.KH =*/ KH, + /*.KW =*/ KW, + /*.OC =*/ OC, + /*.s0 =*/ s0, + /*.nb0 =*/ nb0, + /*.nb1 =*/ nb1, + /*.nb2 =*/ nb2, + }; + + ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_conv_transpose_2d(lib, op); + + ggml_metal_encoder_set_pipeline(enc, pipeline); + ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); + ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); + ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); + ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3); + + // Metal requires buffer size to be multiple of 16 bytes + const size_t smem = GGML_PAD(KW * KH * sizeof(float), 16); + ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); + + ggml_metal_encoder_dispatch_threadgroups(enc, OW, OH, OC, KW, KH, 1); + + return 1; +} + int ggml_metal_op_upscale(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); diff --git a/ggml/src/ggml-metal/ggml-metal-ops.h b/ggml/src/ggml-metal/ggml-metal-ops.h index f352738698beb..0d9cb8af7c1d0 100644 --- a/ggml/src/ggml-metal/ggml-metal-ops.h +++ b/ggml/src/ggml-metal/ggml-metal-ops.h @@ -71,6 +71,7 @@ int ggml_metal_op_norm (ggml_metal_op_t ctx, int idx); int ggml_metal_op_rope (ggml_metal_op_t ctx, int idx); int ggml_metal_op_im2col (ggml_metal_op_t ctx, int idx); int ggml_metal_op_conv_transpose_1d (ggml_metal_op_t ctx, int idx); +int ggml_metal_op_conv_transpose_2d (ggml_metal_op_t ctx, int idx); int ggml_metal_op_upscale (ggml_metal_op_t ctx, int idx); int ggml_metal_op_pad (ggml_metal_op_t ctx, int idx); int ggml_metal_op_pad_reflect_1d (ggml_metal_op_t ctx, int idx); diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index 496610b154b6d..2c2f0141514ca 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -4179,6 +4179,97 @@ kernel void kernel_conv_transpose_1d( uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]]); + +typedef void (conv_transpose_2d_t)( + constant ggml_metal_kargs_conv_transpose_2d & args, + device const float * src0, + device const float * src1, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tgpg[[threadgroups_per_grid]]); + +template +kernel void kernel_conv_transpose_2d( + constant ggml_metal_kargs_conv_transpose_2d & args, + device const T * src0, + device const float * src1, + device char * dst, + threadgroup float * shared_sum [[threadgroup(0)]], + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + uint3 ntg[[threads_per_threadgroup]]) { + + const int64_t out_x = tgpig[0]; + const int64_t out_y = tgpig[1]; + const int64_t out_c = tgpig[2]; + + const int64_t kw = tpitg[0]; + const int64_t kh = tpitg[1]; + + float v = 0.0f; + + for (int64_t in_c = 0; in_c < args.IC; in_c++) { + int64_t in_y = out_y - kh; + + if (in_y < 0 || in_y % args.s0) continue; + + in_y /= args.s0; + + if (in_y >= args.IH) continue; + + int64_t in_x = out_x - kw; + + if (in_x < 0 || in_x % args.s0) continue; + + in_x /= args.s0; + + if (in_x >= args.IW) continue; + + const int64_t input_idx = (args.IW * args.IH) * in_c + (args.IW) * in_y + in_x; + const int64_t kernel_idx = (args.KH * args.KW * args.OC) * in_c + (args.KH * args.KW) * out_c + (args.KW) * kh + kw; + + v += (float)src0[kernel_idx] * src1[input_idx]; + } + + const uint tid = tpitg.y * ntg.x + tpitg.x; + shared_sum[tid] = v; + + threadgroup_barrier(mem_flags::mem_threadgroup); + + if (tid == 0) { + float total = 0.0f; + const uint num_threads = ntg.x * ntg.y; + for (uint i = 0; i < num_threads; i++) { + total += shared_sum[i]; + } + + device float * dst_ptr = (device float *) (dst + out_x*args.nb0 + out_y * args.nb1 + out_c*args.nb2); + dst_ptr[0] = total; + } +} + +template [[host_name("kernel_conv_transpose_2d_f32_f32")]] +kernel void kernel_conv_transpose_2d( + constant ggml_metal_kargs_conv_transpose_2d & args, + device const float * src0, + device const float * src1, + device char * dst, + threadgroup float * shared_sum [[threadgroup(0)]], + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + uint3 ntg[[threads_per_threadgroup]]); + +template [[host_name("kernel_conv_transpose_2d_f16_f32")]] +kernel void kernel_conv_transpose_2d( + constant ggml_metal_kargs_conv_transpose_2d & args, + device const half * src0, + device const float * src1, + device char * dst, + threadgroup float * shared_sum [[threadgroup(0)]], + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + uint3 ntg[[threads_per_threadgroup]]); + kernel void kernel_upscale_f32( constant ggml_metal_kargs_upscale & args, device const char * src0, diff --git a/ggml/src/ggml-opencl/CMakeLists.txt b/ggml/src/ggml-opencl/CMakeLists.txt index 6f6bba55e2805..d3d97f375e8f3 100644 --- a/ggml/src/ggml-opencl/CMakeLists.txt +++ b/ggml/src/ggml-opencl/CMakeLists.txt @@ -91,6 +91,8 @@ set(GGML_OPENCL_KERNELS mul_mv_id_q8_0_f32_flat mul_mv_id_mxfp4_f32 mul_mv_id_mxfp4_f32_flat + gemm_moe_mxfp4_f32 + gemv_moe_mxfp4_f32 mul_mm_f32_f32_l4_lm mul_mm_f16_f32_l4_lm mul_mm_q8_0_f32_l4_lm diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index 2ec896fd0e896..db33a4ab6c2e3 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -15,13 +15,12 @@ #include +#include #include #include #include -#include #include -#include #include #include #include @@ -402,6 +401,7 @@ struct ggml_backend_opencl_context { cl_program program_conv_2d_f32; cl_program program_conv_2d_f16_f32; cl_program program_tsembd; + cl_program program_gemv_moe_mxfp4_f32, program_gemm_moe_mxfp4_f32; cl_program program_mul_mv_id_q4_0_f32_8x_flat; cl_program program_mul_mv_id_q8_0_f32, program_mul_mv_id_q8_0_f32_flat; cl_program program_mul_mv_id_mxfp4_f32; @@ -452,7 +452,7 @@ struct ggml_backend_opencl_context { cl_kernel kernel_mul_mat_f16_f32_tiled; cl_kernel kernel_mul_mat_q4_0_f32, kernel_mul_mat_q4_0_f32_v; cl_kernel kernel_convert_block_q4_0, kernel_restore_block_q4_0; - cl_kernel kernel_convert_block_mxfp4, kernel_restore_block_mxfp4; + cl_kernel kernel_convert_block_mxfp4, kernel_convert_block_mxfp4_trans, kernel_restore_block_mxfp4, kernel_restore_block_mxfp4_trans; cl_kernel kernel_convert_block_q8_0, kernel_restore_block_q8_0; cl_kernel kernel_mul_mat_q4_0_f32_8x_flat; cl_kernel kernel_convert_block_q4_0_noshuffle; @@ -475,6 +475,7 @@ struct ggml_backend_opencl_context { cl_kernel kernel_conv_2d_f32; cl_kernel kernel_conv_2d_f16_f32; cl_kernel kernel_timestep_embedding; + cl_kernel kernel_gemv_moe_mxfp4_f32, kernel_gemm_moe_mxfp4_f32; cl_kernel kernel_mul_mv_id_q4_0_f32_8x_flat; cl_kernel kernel_mul_mv_id_q8_0_f32, kernel_mul_mv_id_q8_0_f32_flat; cl_kernel kernel_mul_mv_id_mxfp4_f32; @@ -531,25 +532,17 @@ struct ggml_backend_opencl_context { } // Dump a csv - float total_kernel_time = 0; - fprintf(fperf, "op name, kernel name, queued duration (ms), submit duration(ms), exec duration (ms), complete duration (ms), total duration (ms), global size, local size, output size\n"); + fprintf(fperf, "op name, kernel name, exec duration (ms), global size, local size, output size\n"); for (const ProfilingInfo & info : profiling_info) { - total_kernel_time += info.cmd_duration_ns/1.e6f; - fprintf(fperf, "%s,%s,%f,%f,%f,%f,%f,%zux%zux%zu,%zux%zux%zu,%zux%zux%zux%zu\n", + fprintf(fperf, "%s,%s,%f,%zux%zux%zu,%zux%zux%zu,%zux%zux%zux%zu\n", info.op_name.c_str(), info.kernel_name.c_str(), - info.cmd_queued_duration_ns/1.e6f, - info.cmd_submit_duration_ns/1.e6f, info.cmd_duration_ns/1.e6f, - info.cmd_complete_duration_ns/1.e6f, - info.cmd_total_duration_ns/1.e6f, info.global_size[0], info.global_size[1], info.global_size[2], info.local_size[0], info.local_size[1], info.local_size[2], info.output_size[0], info.output_size[1], info.output_size[2], info.output_size[3]); } fclose(fperf); - GGML_LOG_INFO("ggml_opencl: total kernel time: %f\n", total_kernel_time); - // Dump a simple chrome trace FILE* ftrace = fopen("cl_trace.json", "w"); if (!ftrace) { @@ -559,14 +552,14 @@ struct ggml_backend_opencl_context { fprintf(ftrace, "[\n"); for (const ProfilingInfo & info : profiling_info) { - fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Host\"},\n", + fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %" PRIu64 ", \"pid\": \"\", \"tid\": \"Host\"},\n", info.kernel_name.c_str(), info.cmd_queued/1000); - fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Host\"},\n", + fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %" PRIu64 ", \"pid\": \"\", \"tid\": \"Host\"},\n", info.kernel_name.c_str(), info.cmd_submit/1000); - fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Device\"},\n", + fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %" PRIu64 ", \"pid\": \"\", \"tid\": \"Device\"},\n", info.kernel_name.c_str(), info.cmd_start/1000); - fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Device\"},\n", + fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %" PRIu64 ", \"pid\": \"\", \"tid\": \"Device\"},\n", info.kernel_name.c_str(), info.cmd_end/1000); } fclose(ftrace); @@ -777,6 +770,8 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve CL_CHECK((backend_ctx->kernel_convert_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_0", &err), err)); CL_CHECK((backend_ctx->kernel_restore_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q4_0", &err), err)); CL_CHECK((backend_ctx->kernel_convert_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_mxfp4", &err), err)); + CL_CHECK((backend_ctx->kernel_convert_block_mxfp4_trans = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_mxfp4_trans", &err), err)); + CL_CHECK((backend_ctx->kernel_restore_block_mxfp4_trans = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_mxfp4_trans", &err), err)); CL_CHECK((backend_ctx->kernel_restore_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_mxfp4", &err), err)); CL_CHECK((backend_ctx->kernel_convert_block_q8_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q8_0", &err), err)); CL_CHECK((backend_ctx->kernel_restore_block_q8_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q8_0", &err), err)); @@ -1991,6 +1986,42 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve CL_CHECK((backend_ctx->CL_mul_mat_Ab_Bi_8x4 = clCreateKernel(backend_ctx->program_CL_gemm, "kernel_mul_mat_Ab_Bi_8x4", &err), err)); GGML_LOG_CONT("."); } + + std::string CL_moe_compile_opts = std::string("-cl-std=") + opencl_c_std + + " -cl-mad-enable " + " -cl-fast-relaxed-math"; + + // gemv_moe_mxfp4_f32 + { +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string kernel_src { + #include "gemv_moe_mxfp4_f32.cl.h" + }; +#else + const std::string kernel_src = read_file("gemv_moe_mxfp4_f32.cl"); +#endif + backend_ctx->program_gemv_moe_mxfp4_f32 = + build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), CL_moe_compile_opts); + + CL_CHECK((backend_ctx->kernel_gemv_moe_mxfp4_f32 = clCreateKernel(backend_ctx->program_gemv_moe_mxfp4_f32, "kernel_gemv_moe_mxfp4_f32", &err), err)); + GGML_LOG_CONT("."); + } + + // gemm_moe_mxfp4_f32 + { +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string kernel_src { + #include "gemm_moe_mxfp4_f32.cl.h" + }; +#else + const std::string kernel_src = read_file("gemm_moe_mxfp4_f32.cl"); +#endif + backend_ctx->program_gemm_moe_mxfp4_f32 = + build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), CL_moe_compile_opts); + + CL_CHECK((backend_ctx->kernel_gemm_moe_mxfp4_f32 = clCreateKernel(backend_ctx->program_gemm_moe_mxfp4_f32, "kernel_gemm_moe_mxfp4_f32", &err), err)); + GGML_LOG_CONT("."); + } #endif // GGML_OPENCL_USE_ADRENO_KERNELS GGML_LOG_CONT("\n"); } @@ -3299,6 +3330,12 @@ inline bool use_adreno_kernels(const ggml_backend_opencl_context *backend_ctx, c tensor->ne[2] == 1 && tensor->ne[3] == 1; } +inline bool use_adreno_moe_kernels(const ggml_backend_opencl_context *backend_ctx, const ggml_tensor *tensor) { + GGML_UNUSED(backend_ctx); + int ne01 = tensor->ne[1]; + return ((strstr(tensor->name, "ffn") != NULL) || (strstr(tensor->name, "as") != NULL)) && (ne01 % 64 == 0); +} + static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { ggml_backend_opencl_context *backend_ctx = ggml_cl2_init(buffer->buft->device); @@ -3601,14 +3638,39 @@ static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err); CL_CHECK(err); +#ifdef GGML_OPENCL_USE_ADRENO_KERNELS + if (use_adreno_moe_kernels(backend_ctx, tensor)) { + cl_kernel kernel = backend_ctx->kernel_convert_block_mxfp4_trans; + + int ne00 = tensor->ne[0]; + int ne01 = tensor->ne[1]; + int ne02 = tensor->ne[2]; + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->q)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->e)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne01)); + + size_t global_work_size[3] = {static_cast(((ne01 + 63) / 64) * 64), static_cast(ne00 / 32), static_cast(ne02)}; + size_t local_work_size[3] = {64, 2, 1}; + + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + CL_CHECK(clWaitForEvents(1, &evt)); + CL_CHECK(clReleaseMemObject(data_device)); + tensor->extra = extra; + + return; + } +#endif cl_kernel kernel = backend_ctx->kernel_convert_block_mxfp4; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->q)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->e)); - size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1}; - size_t local_work_size[] = {64, 1, 1}; + size_t global_work_size[3] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1}; + size_t local_work_size[3] = {64, 1, 1}; cl_event evt; CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); @@ -3624,7 +3686,6 @@ static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer, { extra->q } }; extra->q_img = clCreateImage(context, CL_MEM_READ_ONLY, &img_format_q, &img_desc_q, NULL, &err); - tensor->extra = extra; return; @@ -3751,6 +3812,33 @@ static void ggml_backend_opencl_buffer_get_tensor(ggml_backend_buffer_t buffer, ggml_nbytes(tensor), NULL, &err); CL_CHECK(err); +#ifdef GGML_OPENCL_USE_ADRENO_KERNELS + if (use_adreno_moe_kernels(backend_ctx, tensor)) { + cl_kernel kernel = backend_ctx->kernel_restore_block_mxfp4_trans; + + int ne00 = tensor->ne[0]; + int ne01 = tensor->ne[1]; + int ne02 = tensor->ne[2]; + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->q)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->e)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_int), &ne01)); + + size_t global_work_size[3] = {static_cast(((ne01 + 63) / 64) * 64), static_cast(ne00 / 32), static_cast(ne02)}; + size_t local_work_size[3] = {64, 2, 1}; + + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, + global_work_size, local_work_size, 0, NULL, &evt)); + CL_CHECK(clWaitForEvents(1, &evt)); + CL_CHECK(clEnqueueReadBuffer( + queue, data_device, CL_TRUE, offset, + size, data, 0, NULL, NULL)); + CL_CHECK(clReleaseMemObject(data_device)); + return; + } +#endif cl_kernel kernel = backend_ctx->kernel_restore_block_mxfp4; CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->q)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->e)); @@ -7553,6 +7641,9 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0, const int ne21 = src2->ne[1]; const cl_ulong nb21 = src2->nb[1]; + const cl_ulong nb20 = src2->nb[0]; + + UNUSED(nb20); const int ne0 = dst->ne[0]; const int ne1 = dst->ne[1]; @@ -7692,6 +7783,105 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0, break; } case GGML_TYPE_MXFP4: { +#ifdef GGML_OPENCL_USE_ADRENO_KERNELS + if (use_adreno_moe_kernels(backend_ctx, src0)) { + cl_int status; + + size_t local_size[3] = {64, 2, 1}; + size_t global_size[3] = {64, 2, 1}; + + cl_mem src1_sub_buffer, buf_src1_image, buf_src2; + + int tile_size = 320; + if (ne12 == 1) { // for gemv + kernel = backend_ctx->kernel_gemv_moe_mxfp4_f32; + + // create a sub_buffer for src2 + cl_buffer_region region; + region.origin = offset2; + region.size = ne20 * ne21 * sizeof(int); + buf_src2 = clCreateSubBuffer(extra2->data_device, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); + CL_CHECK(status); + + // set thread grid + global_size[0] = static_cast(ne01); + global_size[1] = 4; + global_size[2] = static_cast(ne20); + local_size[1] = 4; + } else { // for gemm + kernel = backend_ctx->kernel_gemm_moe_mxfp4_f32; + + // preprocess router table + int num_tiles_per_expert = (ne01 + tile_size - 1) / tile_size; + void * host_src2_reorder = malloc(ne20 * ne21 * 4 * num_tiles_per_expert * sizeof(short)); + void * host_src2 = malloc(ne21 * nb21); + CL_CHECK(clEnqueueReadBuffer(backend_ctx->queue, extra2->data_device, CL_TRUE, offset2, ne21 * nb21, host_src2, 0, NULL, NULL)); + int total_experts = nb21 / nb20; + int out_idx = 0; + for (int i_expert = 0; i_expert < ne02; i_expert++) { + for (int i_tile = 0; i_tile < num_tiles_per_expert; i_tile++) { + for (int j = 0; j < ne21; j++) { + for (int i = 0; i < ne20; i++) { + int expert = ((int *)host_src2)[j * total_experts + i]; + if (i_expert == expert) { + ((short *)host_src2_reorder)[out_idx] = static_cast(expert); + ((short *)host_src2_reorder)[out_idx + 1] = static_cast(j * ne11 + (i % ne11)); + ((short *)host_src2_reorder)[out_idx + 2] = static_cast(j * ne20 + i); + ((short *)host_src2_reorder)[out_idx + 3] = static_cast(i_tile); + out_idx += 4; + } + } + } + } + } + buf_src2 = clCreateBuffer(backend_ctx->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, ne20 * ne21 * 4 * num_tiles_per_expert * sizeof(short), host_src2_reorder, &status); + CL_CHECK(status); + + // set thread grid + global_size[0] = static_cast(tile_size); + global_size[2] = static_cast(ne20 * ne21 * num_tiles_per_expert); + } + + // create a sub_buffer for src1 + cl_buffer_region region; + region.origin = offset1; + region.size = ne10 * ne11 * ne12 * sizeof(float); + src1_sub_buffer = clCreateSubBuffer(extra1->data_device, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); + CL_CHECK(status); + + // create image for src1 + cl_image_format image_format_buf_src1 = {CL_RGBA, CL_FLOAT}; + cl_image_desc image_desc_buf_src1 = {CL_MEM_OBJECT_IMAGE1D_BUFFER, static_cast(ne10 * ne11 * ne12 / 4), 0,0,0,0,0,0,0, {src1_sub_buffer}}; + buf_src1_image = clCreateImage(backend_ctx->context, CL_MEM_READ_ONLY, &image_format_buf_src1, &image_desc_buf_src1, NULL, &status); + CL_CHECK(status); + + // Set kernel args + int arg_idx = 0; + CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(cl_mem), &extra0_mxfp4->q)); + CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(cl_mem), &extra0_mxfp4->e)); + CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(cl_mem), &buf_src1_image)); + CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(cl_mem), &buf_src2)); + CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(int), &ne01)); + if (ne12 == 1) { + CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(int), &ne11)); + } else { + CL_CHECK(clSetKernelArg(kernel, arg_idx++, sizeof(int), &tile_size)); + } + + // launch kernel + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_size, local_size, dst); + + // deallocate sub buffers and images + CL_CHECK(clReleaseMemObject(src1_sub_buffer)); + CL_CHECK(clReleaseMemObject(buf_src1_image)); + CL_CHECK(clReleaseMemObject(buf_src2)); + return; + } // else fallback to generic kernel +#endif // GGML_OPENCL_USE_ADRENO_KERNELS + #ifdef GGML_OPENCL_SOA_Q kernel = backend_ctx->kernel_mul_mv_id_mxfp4_f32_flat; diff --git a/ggml/src/ggml-opencl/kernels/cvt.cl b/ggml/src/ggml-opencl/kernels/cvt.cl index 045300eb3a537..b26f9c5fb2a31 100644 --- a/ggml/src/ggml-opencl/kernels/cvt.cl +++ b/ggml/src/ggml-opencl/kernels/cvt.cl @@ -147,6 +147,27 @@ kernel void kernel_convert_block_mxfp4( } } +kernel void kernel_convert_block_mxfp4_trans( + global struct block_mxfp4 * src0, + __global uint4 * dst_q, + __global uchar * dst_e, + uint ne00, + uint ne01 +) { + int i00 = get_global_id(1); + uint i01 = get_global_id(0); + uint i02 = get_global_id(2); + + uint ne00_blk = ne00 / QK_MXFP4; + uint src_blk_offset = i00 + i01 * ne00_blk + i02 * ne00_blk * ne01; + uint dst_blk_offset = i01 + i00 * ne01 + i02 * ne00_blk * ne01; + + global struct block_mxfp4 * b = src0 + src_blk_offset; + + dst_q[dst_blk_offset] = ((global uint4 *)(&(b->qs[0])))[0]; + dst_e[dst_blk_offset] = b->e; +} + kernel void kernel_restore_block_mxfp4( global uchar * src_q, global half * src_e, @@ -162,6 +183,27 @@ kernel void kernel_restore_block_mxfp4( } } +kernel void kernel_restore_block_mxfp4_trans( + __global uint4 * src_q, + __global uchar * src_e, + global struct block_mxfp4 * dst, + uint ne00, + uint ne01 +) { + int i00 = get_global_id(1); + uint i01 = get_global_id(0); + uint i02 = get_global_id(2); + + uint ne00_blk = ne00 / QK_MXFP4; + uint src_blk_offset = i01 + i00 * ne01 + i02 * ne00_blk * ne01; + uint dst_blk_offset = i00 + i01 * ne00_blk + i02 * ne00_blk * ne01; + + global struct block_mxfp4 * b = dst + dst_blk_offset; + + ((global uint4 *)(&(b->qs[0])))[0] = src_q[src_blk_offset]; + b->e = src_e[src_blk_offset]; +} + //------------------------------------------------------------------------------ // block_q8_0 //------------------------------------------------------------------------------ diff --git a/ggml/src/ggml-opencl/kernels/gemm_moe_mxfp4_f32.cl b/ggml/src/ggml-opencl/kernels/gemm_moe_mxfp4_f32.cl new file mode 100644 index 0000000000000..3917aa3fd9798 --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/gemm_moe_mxfp4_f32.cl @@ -0,0 +1,162 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#pragma OPENCL EXTENSION cl_khr_subgroups : enable +#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable + +#define QK_MXFP4 32 +#define N_SIMDGROUP 2 +#define SIMDGROUP_WIDTH 64 + +static inline half8 mxfp4_to_fp16_packed8(ushort2 fp4x8) { //, ushort 0x0E00, ushort 0x8000) { + ushort2 fp16_packed_a_0, fp16_packed_b_0, bias_a, bias_b, sign_a, sign_b; + fp16_packed_a_0.lo = (fp4x8.s0 << 9) & 0x0E00; + fp16_packed_a_0.hi = (fp4x8.s0 << 5) & 0x0E00; + fp16_packed_b_0.lo = (fp4x8.s0 << 1) & 0x0E00; + fp16_packed_b_0.hi = (fp4x8.s0 >> 3) & 0x0E00; + + bias_a.lo = (fp16_packed_a_0.lo != 0) ? 0x3800 : 0x0; + bias_a.hi = (fp16_packed_a_0.hi != 0) ? 0x3800 : 0x0; + bias_b.lo = (fp16_packed_b_0.lo != 0) ? 0x3800 : 0x0; + bias_b.hi = (fp16_packed_b_0.hi != 0) ? 0x3800 : 0x0; + + fp16_packed_a_0.lo = (fp16_packed_a_0.lo != 0x0200) ? fp16_packed_a_0.lo : 0x0; + fp16_packed_a_0.hi = (fp16_packed_a_0.hi != 0x0200) ? fp16_packed_a_0.hi : 0x0; + fp16_packed_b_0.lo = (fp16_packed_b_0.lo != 0x0200) ? fp16_packed_b_0.lo : 0x0; + fp16_packed_b_0.hi = (fp16_packed_b_0.hi != 0x0200) ? fp16_packed_b_0.hi : 0x0; + + sign_a.lo = (fp4x8.s0 << 12) & 0x8000; + sign_a.hi = (fp4x8.s0 << 8) & 0x8000; + sign_b.lo = (fp4x8.s0 << 4) & 0x8000; + sign_b.hi = fp4x8.s0 & 0x8000; + + fp16_packed_a_0 = sign_a + bias_a + fp16_packed_a_0; + fp16_packed_b_0 = sign_b + bias_b + fp16_packed_b_0; + + ushort2 fp16_packed_a_1, fp16_packed_b_1; + fp16_packed_a_1.lo = (fp4x8.s1 << 9) & 0x0E00; + fp16_packed_a_1.hi = (fp4x8.s1 << 5) & 0x0E00; + fp16_packed_b_1.lo = (fp4x8.s1 << 1) & 0x0E00; + fp16_packed_b_1.hi = (fp4x8.s1 >> 3) & 0x0E00; + + bias_a.lo = (fp16_packed_a_1.lo != 0) ? 0x3800 : 0x0; + bias_a.hi = (fp16_packed_a_1.hi != 0) ? 0x3800 : 0x0; + bias_b.lo = (fp16_packed_b_1.lo != 0) ? 0x3800 : 0x0; + bias_b.hi = (fp16_packed_b_1.hi != 0) ? 0x3800 : 0x0; + + fp16_packed_a_1.lo = (fp16_packed_a_1.lo != 0x0200) ? fp16_packed_a_1.lo : 0x0; + fp16_packed_a_1.hi = (fp16_packed_a_1.hi != 0x0200) ? fp16_packed_a_1.hi : 0x0; + fp16_packed_b_1.lo = (fp16_packed_b_1.lo != 0x0200) ? fp16_packed_b_1.lo : 0x0; + fp16_packed_b_1.hi = (fp16_packed_b_1.hi != 0x0200) ? fp16_packed_b_1.hi : 0x0; + + sign_a.lo = (fp4x8.s1 << 12) & 0x8000; + sign_a.hi = (fp4x8.s1 << 8) & 0x8000; + sign_b.lo = (fp4x8.s1 << 4) & 0x8000; + sign_b.hi = fp4x8.s1 & 0x8000; + + fp16_packed_a_1 = sign_a + bias_a + fp16_packed_a_1; + fp16_packed_b_1 = sign_b + bias_b + fp16_packed_b_1; + + return as_half8((ushort8)(fp16_packed_a_0, fp16_packed_b_0, fp16_packed_a_1, fp16_packed_b_1)); +} + +static inline float e8m0_to_fp32(uchar x) { + int bits; + bits = (x == 0) ? 0x00400000 : ((uint) x << 23); + return as_float(bits); +} + + +__attribute__((qcom_reqd_sub_group_size("half"))) +__kernel void kernel_gemm_moe_mxfp4_f32( + __global uint4 * src0_q, + __global uchar * src0_e, + __read_only image1d_buffer_t src1, + __global ushort4 * src2, + __global float * dst, + ulong offsetd, + int ne00, + int ne01, + int tile_size +) { + uint i01 = get_global_id(0); + uint i20 = get_global_id(2); + uint sgid = get_local_id(1); + uint slid = get_sub_group_local_id(); + + ushort4 router = src2[i20]; + ushort expert_id = router.x; + ushort i11 = router.y; + ushort i1 = router.z; + ushort tile_id = router.w; + + if (tile_id * tile_size + i01 >= ne01) { // handle edge case when ne01 is not multiple of tile_size + return; + } + + uint expert_offset = expert_id * ne00 * ne01 / 32; + uint tile_offset = expert_offset + tile_id * tile_size + i01; + + __private float sum = 0.0f; // each thread calculate partial sum of one output + + // loop along ne00 in block granularity, skip 4 blocks every iter + for (uint ib00 = sgid; ib00 < (ne00 / QK_MXFP4); ib00 += N_SIMDGROUP) { + // load one block of q + uint4 regQ = src0_q[tile_offset + ib00 * ne01]; + // convert 8 fp4 to fp16 + half8 fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s0)); + + uint offset = i11 * ne00 / 4 + ib00 * 8; + float4 shared_y4; + shared_y4 = read_imagef(src1, (offset + 0)); + float4 acc = shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); + + shared_y4 = read_imagef(src1, (offset + 4)); + acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); + + + fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s1)); + + shared_y4 = read_imagef(src1, (offset + 1)); + acc += shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); + + shared_y4 = read_imagef(src1, (offset + 5)); + acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); + + + fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s2)); + + shared_y4 = read_imagef(src1, (offset + 2)); + acc += shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); + + shared_y4 = read_imagef(src1, (offset + 6)); + acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); + + + fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s3)); + + shared_y4 = read_imagef(src1, (offset + 3)); + acc += shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); + + shared_y4 = read_imagef(src1, (offset + 7)); + acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); + + uchar regE = src0_e[tile_offset + ib00 * ne01]; + sum += e8m0_to_fp32(regE) * ((acc.s0 + acc.s1) + (acc.s2 + acc.s3)); + } + + // reduction in local memory, assumes #subgroups=4 + __local float reduceLM[SIMDGROUP_WIDTH * (N_SIMDGROUP - 1)]; + if (sgid == 1) reduceLM[SIMDGROUP_WIDTH * 0 + slid] = sum; + // if (sgid == 2) reduceLM[SIMDGROUP_WIDTH * 1 + slid] = sum; + // if (sgid == 3) reduceLM[SIMDGROUP_WIDTH * 2 + slid] = sum; + barrier(CLK_LOCAL_MEM_FENCE); + if (sgid == 0) sum += reduceLM[SIMDGROUP_WIDTH * 0 + slid]; + // if (sgid == 0) sum += reduceLM[SIMDGROUP_WIDTH * 1 + slid]; + // if (sgid == 0) sum += reduceLM[SIMDGROUP_WIDTH * 2 + slid]; + + // 1 outputs per thread in subgroup 0 + if (sgid == 0) { + dst = dst + (offsetd >> 2); + dst[i01 + tile_id * tile_size + i1 * ne01] = sum; + } + +} diff --git a/ggml/src/ggml-opencl/kernels/gemv_moe_mxfp4_f32.cl b/ggml/src/ggml-opencl/kernels/gemv_moe_mxfp4_f32.cl new file mode 100644 index 0000000000000..b4b1e511f945e --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/gemv_moe_mxfp4_f32.cl @@ -0,0 +1,156 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#pragma OPENCL EXTENSION cl_khr_subgroups : enable +#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable + +#define QK_MXFP4 32 +#define N_SIMDGROUP 4 +#define SIMDGROUP_WIDTH 64 + +static inline half8 mxfp4_to_fp16_packed8(ushort2 fp4x8) { //, ushort 0x0E00, ushort 0x8000) { + ushort2 fp16_packed_a_0, fp16_packed_b_0, bias_a, bias_b, sign_a, sign_b; + fp16_packed_a_0.lo = (fp4x8.s0 << 9) & 0x0E00; + fp16_packed_a_0.hi = (fp4x8.s0 << 5) & 0x0E00; + fp16_packed_b_0.lo = (fp4x8.s0 << 1) & 0x0E00; + fp16_packed_b_0.hi = (fp4x8.s0 >> 3) & 0x0E00; + + bias_a.lo = (fp16_packed_a_0.lo != 0) ? 0x3800 : 0x0; + bias_a.hi = (fp16_packed_a_0.hi != 0) ? 0x3800 : 0x0; + bias_b.lo = (fp16_packed_b_0.lo != 0) ? 0x3800 : 0x0; + bias_b.hi = (fp16_packed_b_0.hi != 0) ? 0x3800 : 0x0; + + fp16_packed_a_0.lo = (fp16_packed_a_0.lo != 0x0200) ? fp16_packed_a_0.lo : 0x0; + fp16_packed_a_0.hi = (fp16_packed_a_0.hi != 0x0200) ? fp16_packed_a_0.hi : 0x0; + fp16_packed_b_0.lo = (fp16_packed_b_0.lo != 0x0200) ? fp16_packed_b_0.lo : 0x0; + fp16_packed_b_0.hi = (fp16_packed_b_0.hi != 0x0200) ? fp16_packed_b_0.hi : 0x0; + + sign_a.lo = (fp4x8.s0 << 12) & 0x8000; + sign_a.hi = (fp4x8.s0 << 8) & 0x8000; + sign_b.lo = (fp4x8.s0 << 4) & 0x8000; + sign_b.hi = fp4x8.s0 & 0x8000; + + fp16_packed_a_0 = sign_a + bias_a + fp16_packed_a_0; + fp16_packed_b_0 = sign_b + bias_b + fp16_packed_b_0; + + ushort2 fp16_packed_a_1, fp16_packed_b_1; + fp16_packed_a_1.lo = (fp4x8.s1 << 9) & 0x0E00; + fp16_packed_a_1.hi = (fp4x8.s1 << 5) & 0x0E00; + fp16_packed_b_1.lo = (fp4x8.s1 << 1) & 0x0E00; + fp16_packed_b_1.hi = (fp4x8.s1 >> 3) & 0x0E00; + + bias_a.lo = (fp16_packed_a_1.lo != 0) ? 0x3800 : 0x0; + bias_a.hi = (fp16_packed_a_1.hi != 0) ? 0x3800 : 0x0; + bias_b.lo = (fp16_packed_b_1.lo != 0) ? 0x3800 : 0x0; + bias_b.hi = (fp16_packed_b_1.hi != 0) ? 0x3800 : 0x0; + + fp16_packed_a_1.lo = (fp16_packed_a_1.lo != 0x0200) ? fp16_packed_a_1.lo : 0x0; + fp16_packed_a_1.hi = (fp16_packed_a_1.hi != 0x0200) ? fp16_packed_a_1.hi : 0x0; + fp16_packed_b_1.lo = (fp16_packed_b_1.lo != 0x0200) ? fp16_packed_b_1.lo : 0x0; + fp16_packed_b_1.hi = (fp16_packed_b_1.hi != 0x0200) ? fp16_packed_b_1.hi : 0x0; + + sign_a.lo = (fp4x8.s1 << 12) & 0x8000; + sign_a.hi = (fp4x8.s1 << 8) & 0x8000; + sign_b.lo = (fp4x8.s1 << 4) & 0x8000; + sign_b.hi = fp4x8.s1 & 0x8000; + + fp16_packed_a_1 = sign_a + bias_a + fp16_packed_a_1; + fp16_packed_b_1 = sign_b + bias_b + fp16_packed_b_1; + + return as_half8((ushort8)(fp16_packed_a_0, fp16_packed_b_0, fp16_packed_a_1, fp16_packed_b_1)); +} + +static inline float e8m0_to_fp32(uchar x) { + int bits; + bits = (x == 0) ? 0x00400000 : ((uint) x << 23); + return as_float(bits); +} + + +__attribute__((qcom_reqd_sub_group_size("half"))) +__kernel void kernel_gemv_moe_mxfp4_f32( + __global uint4 * src0_q, + __global uchar * src0_e, + __read_only image1d_buffer_t src1, + __global uint * src2, + __global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne11 +) { + uint i01 = get_global_id(0); + uint i20 = get_global_id(2); + uint sgid = get_local_id(1); + uint slid = get_sub_group_local_id(); + + uint i11 = i20 % ne11; + + uint expert_id = src2[i20]; + uint expert_offset = expert_id * ne00 * ne01 / 32; + + __private float sum = 0.0f; // each thread calculate partial sum of one output + + // loop along ne00 in block granularity, skip 4 blocks every iter + for (uint ib00 = sgid; ib00 < (ne00 / QK_MXFP4); ib00 += N_SIMDGROUP) { + + // load one block of q + uint4 regQ = src0_q[expert_offset + ib00 * ne01 + i01]; + + uint offset = i11 * ne00 / 4 + ib00 * 8; + + half8 fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s0)); + + float4 shared_y4; + shared_y4 = read_imagef(src1, (offset + 0)); + float4 acc = shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); + + shared_y4 = read_imagef(src1, (offset + 4)); + acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); + + + fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s1)); + + shared_y4 = read_imagef(src1, (offset + 1)); + acc += shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); + + shared_y4 = read_imagef(src1, (offset + 5)); + acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); + + + fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s2)); + + shared_y4 = read_imagef(src1, (offset + 2)); + acc += shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); + + shared_y4 = read_imagef(src1, (offset + 6)); + acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); + + + fp16x8 = mxfp4_to_fp16_packed8(as_ushort2(regQ.s3)); + + shared_y4 = read_imagef(src1, (offset + 3)); + acc += shared_y4 * (float4)(fp16x8.s0, fp16x8.s2, fp16x8.s4, fp16x8.s6); + + shared_y4 = read_imagef(src1, (offset + 7)); + acc += shared_y4 * (float4)(fp16x8.s1, fp16x8.s3, fp16x8.s5, fp16x8.s7); + + uchar regE = src0_e[ib00 * ne01 + i01 + expert_offset]; + sum += e8m0_to_fp32(regE) * ((acc.s0 + acc.s1) + (acc.s2 + acc.s3)); + } + + // reduction in local memory, assumes #subgroups=4 + __local float reduceLM[SIMDGROUP_WIDTH * (N_SIMDGROUP - 1)]; + if (sgid == 1) reduceLM[SIMDGROUP_WIDTH * 0 + slid] = sum; + if (sgid == 2) reduceLM[SIMDGROUP_WIDTH * 1 + slid] = sum; + if (sgid == 3) reduceLM[SIMDGROUP_WIDTH * 2 + slid] = sum; + barrier(CLK_LOCAL_MEM_FENCE); + if (sgid == 0) sum += reduceLM[SIMDGROUP_WIDTH * 0 + slid]; + if (sgid == 0) sum += reduceLM[SIMDGROUP_WIDTH * 1 + slid]; + if (sgid == 0) sum += reduceLM[SIMDGROUP_WIDTH * 2 + slid]; + + // 1 outputs per thread in subgroup 0 + if (sgid == 0) { + dst = dst + (offsetd >> 2); + dst[i01 + i20 * ne01] = sum; + } + +} diff --git a/ggml/src/ggml-rpc/ggml-rpc.cpp b/ggml/src/ggml-rpc/ggml-rpc.cpp index aad48d62a850c..a38df5a97e1f0 100644 --- a/ggml/src/ggml-rpc/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc/ggml-rpc.cpp @@ -939,6 +939,7 @@ class rpc_server { bool graph_compute(const std::vector & input, rpc_msg_graph_compute_rsp & response); bool init_tensor(const rpc_msg_init_tensor_req & request); bool get_alloc_size(const rpc_msg_get_alloc_size_req & request, rpc_msg_get_alloc_size_rsp & response); + bool get_device_memory(const rpc_msg_get_device_memory_req & request, rpc_msg_get_device_memory_rsp & response); private: bool get_cached_file(uint64_t hash, std::vector & data); @@ -1458,6 +1459,20 @@ bool rpc_server::graph_compute(const std::vector & input, rpc_msg_graph return true; } +bool rpc_server::get_device_memory(const rpc_msg_get_device_memory_req & request, rpc_msg_get_device_memory_rsp & response) { + uint32_t dev_id = request.device; + if (dev_id >= backends.size()) { + return false; + } + size_t free, total; + ggml_backend_dev_t dev = ggml_backend_get_device(backends[dev_id]); + ggml_backend_dev_memory(dev, &free, &total); + response.free_mem = free; + response.total_mem = total; + LOG_DBG("[%s] device: %u, free_mem: %" PRIu64 ", total_mem: %" PRIu64 "\n", __func__, dev_id, response.free_mem, response.total_mem); + return true; +} + rpc_server::~rpc_server() { for (auto buffer : buffers) { ggml_backend_buffer_free(buffer); @@ -1465,7 +1480,7 @@ rpc_server::~rpc_server() { } static void rpc_serve_client(const std::vector & backends, const char * cache_dir, - sockfd_t sockfd, const std::vector & free_mem, const std::vector & total_mem) { + sockfd_t sockfd) { rpc_server server(backends, cache_dir); uint8_t cmd; if (!recv_data(sockfd, &cmd, 1)) { @@ -1689,15 +1704,10 @@ static void rpc_serve_client(const std::vector & backends, const if (!recv_msg(sockfd, &request, sizeof(request))) { return; } - auto dev_id = request.device; - if (dev_id >= backends.size()) { + rpc_msg_get_device_memory_rsp response; + if (!server.get_device_memory(request, response)) { return; } - rpc_msg_get_device_memory_rsp response; - response.free_mem = free_mem[dev_id]; - response.total_mem = total_mem[dev_id]; - LOG_DBG("[get_device_mem] device: %u, free_mem: %" PRIu64 ", total_mem: %" PRIu64 "\n", dev_id, - response.free_mem, response.total_mem); if (!send_msg(sockfd, &response, sizeof(response))) { return; } @@ -1712,15 +1722,12 @@ static void rpc_serve_client(const std::vector & backends, const } void ggml_backend_rpc_start_server(const char * endpoint, const char * cache_dir, - size_t n_threads, size_t n_devices, - ggml_backend_dev_t * devices, size_t * free_mem, size_t * total_mem) { - if (n_devices == 0 || devices == nullptr || free_mem == nullptr || total_mem == nullptr) { + size_t n_threads, size_t n_devices, ggml_backend_dev_t * devices) { + if (n_devices == 0 || devices == nullptr) { fprintf(stderr, "Invalid arguments to ggml_backend_rpc_start_server\n"); return; } std::vector backends; - std::vector free_mem_vec(free_mem, free_mem + n_devices); - std::vector total_mem_vec(total_mem, total_mem + n_devices); printf("Starting RPC server v%d.%d.%d\n", RPC_PROTO_MAJOR_VERSION, RPC_PROTO_MINOR_VERSION, @@ -1730,8 +1737,10 @@ void ggml_backend_rpc_start_server(const char * endpoint, const char * cache_dir printf("Devices:\n"); for (size_t i = 0; i < n_devices; i++) { auto dev = devices[i]; + size_t free, total; + ggml_backend_dev_memory(dev, &free, &total); printf(" %s: %s (%zu MiB, %zu MiB free)\n", ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), - total_mem[i] / 1024 / 1024, free_mem[i] / 1024 / 1024); + total / 1024 / 1024, free / 1024 / 1024); auto backend = ggml_backend_dev_init(dev, nullptr); if (!backend) { fprintf(stderr, "Failed to create backend for device %s\n", dev->iface.get_name(dev)); @@ -1775,7 +1784,7 @@ void ggml_backend_rpc_start_server(const char * endpoint, const char * cache_dir } printf("Accepted client connection\n"); fflush(stdout); - rpc_serve_client(backends, cache_dir, client_socket->fd, free_mem_vec, total_mem_vec); + rpc_serve_client(backends, cache_dir, client_socket->fd); printf("Client connection closed\n"); fflush(stdout); } diff --git a/ggml/src/ggml-sycl/backend.hpp b/ggml/src/ggml-sycl/backend.hpp index 6ff3215d5a439..b1575b8145138 100644 --- a/ggml/src/ggml-sycl/backend.hpp +++ b/ggml/src/ggml-sycl/backend.hpp @@ -37,5 +37,7 @@ #include "softmax.hpp" #include "tsembd.hpp" #include "wkv.hpp" +#include "pad_reflect_1d.hpp" + #endif // GGML_SYCL_BACKEND_HPP diff --git a/ggml/src/ggml-sycl/element_wise.cpp b/ggml/src/ggml-sycl/element_wise.cpp index aeeb387595017..810995d0cbf74 100644 --- a/ggml/src/ggml-sycl/element_wise.cpp +++ b/ggml/src/ggml-sycl/element_wise.cpp @@ -150,6 +150,26 @@ static __dpct_inline__ T op_clamp(T x, float min_val, float max_val) { return x < static_cast(min_val) ? static_cast(min_val) : (x > static_cast(max_val) ? static_cast(max_val) : x); } +template +static __dpct_inline__ T op_floor(T x) { + return sycl::floor(x); +} + +template +static __dpct_inline__ T op_ceil(T x) { + return sycl::ceil(x); +} + +template +static __dpct_inline__ T op_round(T x) { + return sycl::round(x); +} + +template +static __dpct_inline__ T op_trunc(T x) { + return sycl::trunc(x); +} + template static void unary_op_sgn_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { @@ -304,6 +324,34 @@ static void unary_op_clamp_kernel(const T * x, T * dst, const int k, const sycl: } } +template +static void unary_op_floor_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_floor(x[i]); + } +} + +template +static void unary_op_ceil_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_ceil(x[i]); + } +} + +template +static void unary_op_round_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_round(x[i]); + } +} + +template +static void unary_op_trunc_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_trunc(x[i]); + } +} + template static void upscale(const T *x, T *dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, @@ -397,6 +445,14 @@ static void acc_f32_sycl(const float *x, const float *y, float *dst, }); } +template +static void arange_kernel(T * dst, const int k, T start, T step, + const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = start + static_cast(i) * step; + } +} + template static void upscale_sycl(const T *x, T *dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, @@ -565,6 +621,25 @@ static inline void dispatch_ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx } +static inline void ggml_sycl_op_arange(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + GGML_ASSERT(dst->type == GGML_TYPE_F32); + float start, stop, step; + memcpy(&start, dst->op_params, sizeof(float)); + memcpy(&stop, (float *) dst->op_params + 1, sizeof(float)); + memcpy(&step, (float *) dst->op_params + 2, sizeof(float)); + dpct::queue_ptr stream = ctx.stream(); + SYCL_CHECK(ggml_sycl_set_device(ctx.device)); + float * dst_ptr = (float *)dst->data; + const int k = (int)ggml_nelements(dst); + const int num_blocks = ceil_div(k, SYCL_ARANGE_BLOCK_SIZE); + stream->parallel_for( + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_ARANGE_BLOCK_SIZE), + sycl::range<1>(SYCL_ARANGE_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + arange_kernel(dst_ptr, k, start, step, item_ct1); + }); +} + } // namespace ggml_sycl_detail @@ -870,6 +945,58 @@ static inline void ggml_sycl_op_clamp(ggml_backend_sycl_context & ctx, ggml_tens }, min_val, max_val); } +static inline void ggml_sycl_op_floor(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, 256); + stream->parallel_for( + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), + sycl::range<1>(256)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_floor_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); +} + +static inline void ggml_sycl_op_ceil(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, 256); + stream->parallel_for( + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), + sycl::range<1>(256)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_ceil_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); +} + +static inline void ggml_sycl_op_round(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, 256); + stream->parallel_for( + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), + sycl::range<1>(256)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_round_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); +} + +static inline void ggml_sycl_op_trunc(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, 256); + stream->parallel_for( + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), + sycl::range<1>(256)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_trunc_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); +} + static inline void ggml_sycl_op_acc(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(dst->src[1]->type == GGML_TYPE_F32); @@ -1090,3 +1217,28 @@ void ggml_sycl_geglu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_geglu_quick(ctx, dst); } + +void ggml_sycl_arange(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/0); + ggml_sycl_detail::ggml_sycl_op_arange(ctx, dst); +} + +void ggml_sycl_floor(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); + ggml_sycl_op_floor(ctx, dst); +} + +void ggml_sycl_ceil(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); + ggml_sycl_op_ceil(ctx, dst); +} + +void ggml_sycl_round(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); + ggml_sycl_op_round(ctx, dst); +} + +void ggml_sycl_trunc(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); + ggml_sycl_op_trunc(ctx, dst); +} diff --git a/ggml/src/ggml-sycl/element_wise.hpp b/ggml/src/ggml-sycl/element_wise.hpp index 434743172876c..fcf93295cb215 100644 --- a/ggml/src/ggml-sycl/element_wise.hpp +++ b/ggml/src/ggml-sycl/element_wise.hpp @@ -80,5 +80,11 @@ void ggml_sycl_reglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_swiglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_geglu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_geglu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst); +void ggml_sycl_floor(ggml_backend_sycl_context & ctx, ggml_tensor * dst); +void ggml_sycl_ceil(ggml_backend_sycl_context & ctx, ggml_tensor * dst); +void ggml_sycl_round(ggml_backend_sycl_context & ctx, ggml_tensor * dst); +void ggml_sycl_trunc(ggml_backend_sycl_context & ctx, ggml_tensor * dst); + +void ggml_sycl_arange(ggml_backend_sycl_context & ctx, ggml_tensor * dst); #endif // GGML_SYCL_ELEMENTWISE_HPP diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 45b8c216c94d2..b695ba051b025 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -30,6 +30,9 @@ #include #include +#if defined(GGML_SYCL_GRAPH) && SYCL_EXT_ONEAPI_ASYNC_MEMORY_ALLOC +# include +#endif #include #include "ggml-sycl.h" @@ -42,6 +45,7 @@ #include "ggml-sycl/presets.hpp" #include "ggml-sycl/gemm.hpp" #include "ggml-sycl/set_rows.hpp" +#include "ggml-sycl/set.hpp" #include "ggml-sycl/sycl_hw.hpp" #include "ggml-sycl/getrows.hpp" #include "ggml-sycl/quantize.hpp" @@ -53,6 +57,7 @@ int g_ggml_sycl_disable_optimize = 0; int g_ggml_sycl_disable_graph = 0; int g_ggml_sycl_disable_dnn = 0; int g_ggml_sycl_prioritize_dmmv = 0; +int g_ggml_sycl_use_async_mem_op = 0; static ggml_sycl_device_info ggml_sycl_init() { ggml_sycl_device_info info = {}; @@ -236,7 +241,20 @@ static void ggml_check_sycl() try { fprintf(stderr, "%s: SYCL_USE_XMX: no\n", __func__); #endif */ - + // Currently, we only use async malloc / free when graphs are enabled as it is required for the calls to be + // properly recorded. As this SYCL extension matures it may be beneficial to enable as the default path and in + // other places. +#if defined(GGML_SYCL_GRAPH) && SYCL_EXT_ONEAPI_ASYNC_MEMORY_ALLOC + g_ggml_sycl_use_async_mem_op = !g_ggml_sycl_disable_graph; + if (g_ggml_sycl_use_async_mem_op) { + for (unsigned int i = 0; i < dpct::dev_mgr::instance().device_count(); ++i) { + if (!dpct::dev_mgr::instance().get_device(i).has(sycl::aspect::ext_oneapi_async_memory_alloc)) { + g_ggml_sycl_use_async_mem_op = 0; + break; + } + } + } +#endif if (CHECK_TRY_ERROR(g_all_sycl_device_count = dpct::dev_mgr::instance().device_count()) != 0) { initialized = true; @@ -2151,6 +2169,30 @@ inline void ggml_sycl_op_sum_rows(ggml_backend_sycl_context & ctx, ggml_tensor * sum_rows_f32_sycl(src0_dd, dst_dd, ncols, nrows, main_stream); } +inline void ggml_sycl_op_mean(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + dpct::queue_ptr main_stream = ctx.stream(); + SYCL_CHECK(ggml_sycl_set_device(ctx.device)); + + const float * src0_dd = static_cast(dst->src[0]->data); + float * dst_dd = static_cast(dst->data); + + const int64_t ncols = dst->src[0]->ne[0]; + const int64_t nrows = ggml_nrows(dst->src[0]); + + sum_rows_f32_sycl(src0_dd, dst_dd, ncols, nrows, main_stream); + + main_stream->parallel_for( + sycl::range<1>(nrows), + [=](sycl::id<1> row) { + dst_dd[row] /= ncols; + } + ); +} + + inline void ggml_sycl_op_argsort(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_I32); @@ -3006,19 +3048,51 @@ static bool ggml_sycl_supports_dmmv(enum ggml_type type) { } } +// Helper functions to unify device memory allocation for both async and sync paths +static inline void * sycl_ext_malloc_device(dpct::queue_ptr stream, size_t size) { + bool use_async = g_ggml_sycl_use_async_mem_op; +#if defined(GGML_SYCL_GRAPH) && SYCL_EXT_ONEAPI_ASYNC_MEMORY_ALLOC + if (use_async) { + return syclex::async_malloc(*stream, sycl::usm::alloc::device, size); + } +#else + // If async allocation extension is not available, use_async should always be false. + GGML_ASSERT(!use_async); +#endif + return sycl::malloc(size, *stream, sycl::usm::alloc::device); +} + +static inline void sycl_ext_free(dpct::queue_ptr stream, void * ptr) { + bool use_async = g_ggml_sycl_use_async_mem_op; +#if defined(GGML_SYCL_GRAPH) && SYCL_EXT_ONEAPI_ASYNC_MEMORY_ALLOC + if (use_async) { + syclex::async_free(*stream, ptr); + return; + } +#else + // If async allocation extension is not available, use_async should always be false. + GGML_ASSERT(!use_async); +#endif + sycl::free(ptr, *stream); +} + static void reorder_qw_q4_0(uint8_t * data_device, const int ncols, const int nrows, size_t size, size_t offset, dpct::queue_ptr stream) { - auto * tmp_buf = sycl::malloc_shared(size, *stream); - SYCL_CHECK( - CHECK_TRY_ERROR((*stream).memcpy(tmp_buf, data_device, size) - .wait())); + uint8_t * tmp_buf = static_cast(sycl_ext_malloc_device(stream, size)); + + sycl::event copy_event; + SYCL_CHECK(CHECK_TRY_ERROR(copy_event = stream->memcpy(tmp_buf, data_device, size))); + if (!g_ggml_sycl_use_async_mem_op) { + copy_event.wait(); + } + GGML_ASSERT((size % sizeof(block_q4_0) == 0)); GGML_ASSERT((offset % sizeof(block_q4_0) == 0)); int offset_blks = offset / sizeof(block_q4_0); auto qs_ptr = data_device + offset_blks * QK4_0 / 2; auto d_ptr = (sycl::half*)(qs_ptr + ncols * nrows / 2) + offset_blks; - stream->parallel_for( + auto reorder_event = stream->parallel_for( size / sizeof(block_q4_0), [=](auto i) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { const block_q4_0* x = (const block_q4_0*)tmp_buf; @@ -3029,9 +3103,11 @@ static void reorder_qw_q4_0(uint8_t * data_device, const int ncols, const int nr *(qs_ptr + ib * QK4_0 / 2 + j) = x[ib].qs[j]; } *(d_ptr + ib) = x[ib].d; - }).wait_and_throw(); - - sycl::free(tmp_buf, *stream); + }); + if (!g_ggml_sycl_use_async_mem_op) { + reorder_event.wait_and_throw(); + } + sycl_ext_free(stream, tmp_buf); } static void reorder_qw_q4_k(uint8_t * data_device, size_t size, size_t offset, dpct::queue_ptr stream) { @@ -3040,14 +3116,19 @@ static void reorder_qw_q4_k(uint8_t * data_device, size_t size, size_t offset, d const int nblocks = size / sizeof(block_q4_K); - auto * tmp_buf = sycl::malloc_shared(size, *stream); - SYCL_CHECK(CHECK_TRY_ERROR((*stream).memcpy(tmp_buf, data_device, size).wait())); + uint8_t * tmp_buf = static_cast(sycl_ext_malloc_device(stream, size)); + + sycl::event copy_event; + SYCL_CHECK(CHECK_TRY_ERROR(copy_event = stream->memcpy(tmp_buf, data_device, size))); + if (!g_ggml_sycl_use_async_mem_op) { + copy_event.wait(); + } auto * qs_ptr = data_device; auto * scales_ptr = qs_ptr + QK_K / 2 * nblocks; auto * dm_ptr = (sycl::half2 *) (scales_ptr + K_SCALE_SIZE * nblocks); - stream->parallel_for(nblocks, [=](auto i) { + auto reorder_event = stream->parallel_for(nblocks, [=](auto i) { const block_q4_K * x = (const block_q4_K *) tmp_buf; const int ib = i; @@ -3060,9 +3141,11 @@ static void reorder_qw_q4_k(uint8_t * data_device, size_t size, size_t offset, d } dm_ptr[ib] = x[ib].dm; - }).wait_and_throw(); - - sycl::free(tmp_buf, *stream); + }); + if (!g_ggml_sycl_use_async_mem_op) { + reorder_event.wait_and_throw(); + } + sycl_ext_free(stream, tmp_buf); } static void reorder_qw_q6_k(uint8_t * data_device, size_t size, size_t offset, dpct::queue_ptr stream) { @@ -3071,42 +3154,46 @@ static void reorder_qw_q6_k(uint8_t * data_device, size_t size, size_t offset, d const int nblocks = size / sizeof(block_q6_K); - auto * tmp_buf = sycl::malloc_shared(size, *stream); - SYCL_CHECK(CHECK_TRY_ERROR((*stream).memcpy(tmp_buf, data_device, size).wait())); + uint8_t * tmp_buf = static_cast(sycl_ext_malloc_device(stream, size)); + + sycl::event copy_event; + SYCL_CHECK(CHECK_TRY_ERROR(copy_event = stream->memcpy(tmp_buf, data_device, size))); + if (!g_ggml_sycl_use_async_mem_op) { + copy_event.wait(); + } auto * ql_ptr = data_device; auto * qh_ptr = ql_ptr + (QK_K / 2) * nblocks; auto * scales_ptr = qh_ptr + (QK_K / 4) * nblocks; sycl::half * dm_ptr = (sycl::half *) (scales_ptr + (QK_K / 16) * nblocks); - stream - ->parallel_for(nblocks, - [=](auto i) { - const block_q6_K * x = (const block_q6_K *) tmp_buf; - const int ib = i; - - const uint8_t * ql = x[ib].ql; - const uint8_t * qh = x[ib].qh; - uint8_t * base_ql_ptr = ql_ptr + (QK_K / 2) * ib; - uint8_t * base_qh_ptr = qh_ptr + (QK_K / 4) * ib; - uint8_t * base_scales_ptr = scales_ptr + (QK_K / 16) * ib; + auto reorder_event = stream->parallel_for(nblocks, [=](auto i) { + const block_q6_K * x = (const block_q6_K *) tmp_buf; + const int ib = i; - for (int j = 0; j < QK_K / 2; ++j) { - base_ql_ptr[j] = ql[j]; - } - for (int j = 0; j < QK_K / 4; ++j) { - base_qh_ptr[j] = qh[j]; - } + const uint8_t * ql = x[ib].ql; + const uint8_t * qh = x[ib].qh; + uint8_t * base_ql_ptr = ql_ptr + (QK_K / 2) * ib; + uint8_t * base_qh_ptr = qh_ptr + (QK_K / 4) * ib; + uint8_t * base_scales_ptr = scales_ptr + (QK_K / 16) * ib; - for (int j = 0; j < QK_K / 16; ++j) { - base_scales_ptr[j] = x[ib].scales[j]; - } + for (int j = 0; j < QK_K / 2; ++j) { + base_ql_ptr[j] = ql[j]; + } + for (int j = 0; j < QK_K / 4; ++j) { + base_qh_ptr[j] = qh[j]; + } - dm_ptr[ib] = x[ib].d; - }) - .wait_and_throw(); + for (int j = 0; j < QK_K / 16; ++j) { + base_scales_ptr[j] = x[ib].scales[j]; + } - sycl::free(tmp_buf, *stream); + dm_ptr[ib] = x[ib].d; + }); + if (!g_ggml_sycl_use_async_mem_op) { + reorder_event.wait_and_throw(); + } + sycl_ext_free(stream, tmp_buf); } static void reorder_qw(const ggml_tensor * src0, dpct::queue_ptr stream) { @@ -3535,6 +3622,12 @@ static void ggml_sycl_sum_rows(ggml_backend_sycl_context & ctx, ggml_tensor * ds ggml_sycl_op_sum_rows(ctx, dst); } +static void ggml_sycl_mean(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); + GGML_ASSERT(ggml_is_contiguous(dst->src[0])); + ggml_sycl_op_mean(ctx, dst); +} + static void ggml_sycl_argsort(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); GGML_ASSERT(ggml_is_contiguous(dst->src[0])); @@ -3589,6 +3682,9 @@ static bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct gg case GGML_OP_GET_ROWS: ggml_sycl_get_rows(ctx, dst); break; + case GGML_OP_SET: + ggml_sycl_op_set(ctx, dst); + break; case GGML_OP_SET_ROWS: ggml_sycl_op_set_rows(ctx, dst); break; @@ -3664,6 +3760,18 @@ static bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct gg case GGML_UNARY_OP_ELU: ggml_sycl_elu(ctx, dst); break; + case GGML_UNARY_OP_FLOOR: + ggml_sycl_floor(ctx, dst); + break; + case GGML_UNARY_OP_CEIL: + ggml_sycl_ceil(ctx, dst); + break; + case GGML_UNARY_OP_ROUND: + ggml_sycl_round(ctx, dst); + break; + case GGML_UNARY_OP_TRUNC: + ggml_sycl_trunc(ctx, dst); + break; default: return false; } @@ -3698,6 +3806,9 @@ static bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct gg case GGML_OP_CONCAT: ggml_sycl_op_concat(ctx, dst); break; + case GGML_OP_PAD_REFLECT_1D: + ggml_sycl_op_pad_reflect_1d(ctx,dst); + break; case GGML_OP_UPSCALE: ggml_sycl_upscale(ctx, dst); break; @@ -3784,6 +3895,9 @@ static bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct gg case GGML_OP_SUM_ROWS: ggml_sycl_sum_rows(ctx, dst); break; + case GGML_OP_MEAN: + ggml_sycl_mean(ctx, dst); + break; case GGML_OP_ARGSORT: ggml_sycl_argsort(ctx, dst); break; @@ -3799,6 +3913,9 @@ static bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct gg case GGML_OP_GATED_LINEAR_ATTN: ggml_sycl_op_gated_linear_attn(ctx, dst); break; + case GGML_OP_ARANGE: + ggml_sycl_arange(ctx, dst); + break; default: return false; } @@ -4001,6 +4118,18 @@ static bool check_graph_compatibility(ggml_cgraph * cgraph) { GGML_LOG_INFO("%s: disabling SYCL graphs due to unsupported node type %s\n", __func__, ggml_op_name(node_op)); return false; + case GGML_OP_MUL_MAT: + // We cannot use graphs with ggml_sycl_mul_mat() when SYCL async memory allocation extensions are not available, + // as SYCL malloc / free and host wait calls are not supported when recording to a graph which are all present + // in reordering. + if (!g_ggml_sycl_use_async_mem_op) { + GGML_LOG_INFO( + "%s: disabling SYCL graphs due to unsupported node type when using a compiler without the " + "oneAPI async memory allocation extension " + "%s\n", + __func__, ggml_op_name(node_op)); + return false; + } } } return true; @@ -4222,6 +4351,10 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_UNARY_OP_SGN: case GGML_UNARY_OP_ABS: case GGML_UNARY_OP_ELU: + case GGML_UNARY_OP_FLOOR: + case GGML_UNARY_OP_CEIL: + case GGML_UNARY_OP_ROUND: + case GGML_UNARY_OP_TRUNC: #if defined (GGML_SYCL_F16) return ggml_is_contiguous(op->src[0]) && (op->type == op->src[0]->type); #else @@ -4295,6 +4428,12 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g return false; } } + case GGML_OP_SET: + return (op->type == GGML_TYPE_F32) && + (op->src[0] && op->src[1]) && + (op->src[0]->type == GGML_TYPE_F32) && + (op->src[1]->type == GGML_TYPE_F32); + case GGML_OP_SET_ROWS: { return ((op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16 || op->type == GGML_TYPE_BF16 || @@ -4393,6 +4532,8 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_DIV: case GGML_OP_REPEAT: return true; + case GGML_OP_PAD_REFLECT_1D: + return ggml_is_contiguous(op->src[0]) && op-> type == GGML_TYPE_F32 && op->src[0]->type == GGML_TYPE_F32; case GGML_OP_SQR: case GGML_OP_SQRT: case GGML_OP_SIN: @@ -4431,6 +4572,7 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g return op->src[0]->type == GGML_TYPE_F32 && op->op_params[0] == GGML_SCALE_MODE_NEAREST; case GGML_OP_SUM: case GGML_OP_SUM_ROWS: + case GGML_OP_MEAN: case GGML_OP_ARGSORT: return ggml_is_contiguous(op->src[0]); case GGML_OP_POOL_2D: @@ -4444,6 +4586,8 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_RWKV_WKV7: case GGML_OP_GATED_LINEAR_ATTN: return true; + case GGML_OP_ARANGE: + return op->type == GGML_TYPE_F32; default: return false; } diff --git a/ggml/src/ggml-sycl/pad_reflect_1d.cpp b/ggml/src/ggml-sycl/pad_reflect_1d.cpp new file mode 100644 index 0000000000000..e56655a98a106 --- /dev/null +++ b/ggml/src/ggml-sycl/pad_reflect_1d.cpp @@ -0,0 +1,72 @@ +#include "pad_reflect_1d.hpp" + +void pad_reflect_1d_f32(const float* src,float* dst, + const int64_t ne0, const int64_t ne02, const int p0, const int p1, + const int64_t nb0, const int64_t nb1, const int64_t nb2, const int64_t nb3, + const int64_t nb00, const int64_t nb01, const int64_t nb02, const int64_t nb03, + const sycl::nd_item<3> &item_ct1){ + + const int i0 = item_ct1.get_group(0) * SYCL_CONCAT_BLOCK_SIZE + item_ct1.get_local_id(0); + const int i1 = item_ct1.get_group(1); + const int g2 = item_ct1.get_group(2); + const int i2 = g2 % ne02; + const int i3 = g2 / ne02; + + if (i0 >= p0 + ne0 + p1) return; + + int t = i0 - p0; + int period = 2 * ne0 -2; + int m = t % period; + m += (m < 0) * period; + int center = ne0 -1; + int srci0 = center - abs(center - m); + + int offest_src = i3*nb3 + i2*nb2 + i1*nb1 + srci0*nb0; + int offest_dst = i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00; + dst[offest_dst] = src[offest_src]; + +} + +void ggml_sycl_op_pad_reflect_1d(ggml_backend_sycl_context& ctx, ggml_tensor* dst){ + + const ggml_tensor * src0 = dst->src[0]; + queue_ptr stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + const int32_t * opts = (const int32_t *) dst->op_params; + const int p0 = opts[0]; + const int p1 = opts[1]; + + const int64_t ne0 = src0->ne[0]; + + const int64_t ne00 = dst->ne[0]; + const int64_t ne01 = dst->ne[1]; + const int64_t ne02 = dst->ne[2]; + const int64_t ne03 = dst->ne[3]; + + const int64_t nb00 = dst->nb[0]; + const int64_t nb01 = dst->nb[1]; + const int64_t nb02 = dst->nb[2]; + const int64_t nb03 = dst->nb[3]; + const int64_t nb0 = src0->nb[0]; + const int64_t nb1 = src0->nb[1]; + const int64_t nb2 = src0->nb[2]; + const int64_t nb3 = src0->nb[3]; + + int num_blocks = (ne00 + SYCL_CONCAT_BLOCK_SIZE - 1) / SYCL_CONCAT_BLOCK_SIZE; + sycl::range<3> global(num_blocks * SYCL_CONCAT_BLOCK_SIZE, ne01, ne02*ne03); + sycl::range<3> local(SYCL_CONCAT_BLOCK_SIZE, 1, 1); + + stream->parallel_for( + sycl::nd_range<3>(global, + local), + [=](sycl::nd_item<3> item_ct1) { pad_reflect_1d_f32( + (const float *) src0->data, (float *) dst->data, + ne0, ne02, p0, p1, + nb0, nb1, nb2, nb3, + nb00, nb01, nb02, nb03 + , item_ct1); + }); +} diff --git a/ggml/src/ggml-sycl/pad_reflect_1d.hpp b/ggml/src/ggml-sycl/pad_reflect_1d.hpp new file mode 100644 index 0000000000000..a24509dea6384 --- /dev/null +++ b/ggml/src/ggml-sycl/pad_reflect_1d.hpp @@ -0,0 +1,8 @@ +#ifndef GGML_SYCL_PAD_REFLECT_1D_HPP +#define GGML_SYCL_PAD_REFLECT_1D_HPP + +#include "common.hpp" + +void ggml_sycl_op_pad_reflect_1d(ggml_backend_sycl_context& ctx, ggml_tensor* dst); + +#endif // GGML_SYCL_PAD_REFLECT_1D_HPP diff --git a/ggml/src/ggml-sycl/presets.hpp b/ggml/src/ggml-sycl/presets.hpp index af1890727df8f..b6517374230a8 100644 --- a/ggml/src/ggml-sycl/presets.hpp +++ b/ggml/src/ggml-sycl/presets.hpp @@ -31,6 +31,7 @@ #define SYCL_SQRT_BLOCK_SIZE 256 #define SYCL_SIN_BLOCK_SIZE 256 #define SYCL_SQR_BLOCK_SIZE 256 +#define SYCL_SET_BLOCK_SIZE 256 #define SYCL_CPY_BLOCK_SIZE 32 #define SYCL_SCALE_BLOCK_SIZE 256 #define SYCL_CLAMP_BLOCK_SIZE 256 @@ -49,6 +50,7 @@ #define SYCL_ARGMAX_BLOCK_SIZE 256 #define SYCL_CONV_TRANPOSE_1D_BLOCK_SIZE 256 #define SYCL_TIMESTEP_EMBEDDING_BLOCK_SIZE 256 +#define SYCL_ARANGE_BLOCK_SIZE 256 // dmmv = dequantize_mul_mat_vec #ifndef GGML_SYCL_DMMV_X diff --git a/ggml/src/ggml-sycl/set.cpp b/ggml/src/ggml-sycl/set.cpp new file mode 100644 index 0000000000000..381326d230ab0 --- /dev/null +++ b/ggml/src/ggml-sycl/set.cpp @@ -0,0 +1,73 @@ +#include "presets.hpp" +#include "common.hpp" +#include "ggml.h" +#include "set.hpp" +#include +#include +using namespace sycl; + +// Internal function: perform element-wise set operation for each thread +inline void set_f32(const float* src, float* dst, + const int64_t ne0, const int64_t ne1, + const int64_t ne2, const int64_t ne3, + const int64_t nb[3], const int64_t src_nb[3], + const int64_t offset_elem, + const nd_item<1>& item) +{ + const size_t idx = item.get_global_id(0); + const size_t total = ne0 * ne1 * ne2 * ne3; + if (idx >= total) return; + + // Convert linear index to 4D indices + const size_t i3 = idx / (ne2 * ne1 * ne0); + const size_t rem = idx % (ne2 * ne1 * ne0); + const size_t i2 = rem / (ne1 * ne0); + const size_t rem2 = rem % (ne1 * ne0); + const size_t i1 = rem2 / ne0; + const size_t i0 = rem2 % ne0; + + // Compute source and destination indices and copy + dst[i0 + i1*nb[0] + i2*nb[1] + i3*nb[2] + offset_elem] = + src[i0 + i1*src_nb[0] + i2*src_nb[1] + i3*src_nb[2]]; +} + +// Main function: prepare GPU queue and launch parallel_for +void ggml_sycl_op_set(ggml_backend_sycl_context& ctx, ggml_tensor* dst) { + const ggml_tensor* src0 = dst->src[0]; + const ggml_tensor* src1 = dst->src[1]; + + // Ensure shapes and types are compatible + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + GGML_ASSERT(dst->type == src0->type && src0->type == src1->type && dst->type == GGML_TYPE_F32); + + const int32_t* opts = (const int32_t*) dst->op_params; + const int64_t nb[3] = {opts[0]/sizeof(float), opts[1]/sizeof(float), opts[2]/sizeof(float)}; + const int64_t offset_elem = opts[3] / sizeof(float); + const bool inplace = opts[4]; + + float* dst_ptr = (float*) dst->data; + const float* src0_ptr = (const float*) src0->data; + const float* src1_ptr = (const float*) src1->data; + + queue_ptr stream = ctx.stream(); + + // Copy src0 to dst if not inplace + if (!inplace) + stream->memcpy(dst_ptr, src0_ptr, ggml_nbytes(dst)); + + const int64_t ne[4] = {src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3]}; + const int64_t src_nb[3] = {src1->nb[1]/sizeof(float), src1->nb[2]/sizeof(float), src1->nb[3]/sizeof(float)}; + + const size_t total_threads = ne[0]*ne[1]*ne[2]*ne[3]; + const size_t grid_size = ((total_threads + SYCL_SET_BLOCK_SIZE - 1) / SYCL_SET_BLOCK_SIZE) * SYCL_SET_BLOCK_SIZE; + + // Copy src0 to dst if not inplace + stream->parallel_for( + nd_range<1>(range<1>(grid_size), range<1>(SYCL_SET_BLOCK_SIZE)), + [=](nd_item<1> item) { + set_f32(src1_ptr, dst_ptr, + ne[0], ne[1], ne[2], ne[3], + nb, src_nb, offset_elem, item); } + ); +} diff --git a/ggml/src/ggml-sycl/set.hpp b/ggml/src/ggml-sycl/set.hpp new file mode 100644 index 0000000000000..657d7ac9a7b07 --- /dev/null +++ b/ggml/src/ggml-sycl/set.hpp @@ -0,0 +1,5 @@ +#pragma once +#include "backend.hpp" +#include "ggml.h" + +void ggml_sycl_op_set(ggml_backend_sycl_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 1674dc66ab912..b783f7805e924 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -96,8 +96,6 @@ static bool is_pow2(uint32_t x) { return x > 1 && (x & (x-1)) == 0; } #define GGML_VK_MAX_NODES 8192 -#define MAX_VK_BUFFERS 256 - #define VK_CHECK(err, msg) \ do { \ vk::Result err_ = (err); \ @@ -385,6 +383,14 @@ enum shader_reduction_mode { static constexpr uint32_t num_argsort_pipelines = 11; static constexpr uint32_t max_argsort_cols = 1 << (num_argsort_pipelines-1); +static constexpr uint32_t num_topk_moe_pipelines = 10; + +static constexpr std::array topk_moe_norm{ GGML_OP_SOFT_MAX, GGML_OP_RESHAPE, GGML_OP_ARGSORT, + GGML_OP_VIEW, GGML_OP_GET_ROWS, GGML_OP_RESHAPE, + GGML_OP_SUM_ROWS, GGML_OP_DIV, GGML_OP_RESHAPE }; +static constexpr std::array topk_moe { GGML_OP_SOFT_MAX, GGML_OP_RESHAPE, GGML_OP_ARGSORT, + GGML_OP_VIEW, GGML_OP_GET_ROWS }; + struct vk_device_struct { std::recursive_mutex mutex; @@ -582,6 +588,9 @@ struct vk_device_struct { vk_pipeline pipeline_pool2d_f32; vk_pipeline pipeline_rwkv_wkv6_f32; vk_pipeline pipeline_rwkv_wkv7_f32; + vk_pipeline pipeline_ssm_scan_f32_d128; + vk_pipeline pipeline_ssm_scan_f32_d256; + vk_pipeline pipeline_ssm_conv_f32; vk_pipeline pipeline_opt_step_adamw_f32; vk_pipeline pipeline_opt_step_sgd_f32; vk_pipeline pipeline_conv2d_f32[CONV_SHAPE_COUNT]; @@ -595,6 +604,9 @@ struct vk_device_struct { vk_pipeline pipeline_flash_attn_split_k_reduce; + // [2] is {!norm, norm} + vk_pipeline pipeline_topk_moe[num_topk_moe_pipelines][2]; + std::vector all_pipelines; std::vector> pinned_memory; @@ -938,6 +950,11 @@ struct vk_op_multi_add_push_constants { static_assert(MAX_PARAMETER_COUNT == 12); static_assert(sizeof(vk_op_multi_add_push_constants) <= 256); +struct vk_op_topk_moe_push_constants { + uint32_t n_rows; + uint32_t n_expert_used; +}; + struct vk_op_add_id_push_constants { uint32_t ne0; uint32_t ne1; @@ -1087,6 +1104,19 @@ struct vk_op_rwkv_wkv7_push_constants { uint32_t C; uint32_t H; }; +struct vk_op_ssm_scan_push_constants { + uint32_t nb02, nb03, nb12, nb13; + uint32_t nb21, nb22, nb31; + uint32_t nb42, nb43, nb52, nb53; + uint32_t s_off; + uint32_t n_head, d_head, n_group, n_tok; +}; +struct vk_op_ssm_conv_push_constants { + uint32_t nb01, nb02; + uint32_t nb11; + uint32_t dst_nb0, dst_nb1, dst_nb2; + uint32_t nc, ncs, nr, n_t, n_s; +}; struct vk_op_conv2d_push_constants { uint32_t Cout; @@ -1279,7 +1309,6 @@ struct ggml_vk_garbage_collector { std::vector tl_semaphores; std::vector semaphores; std::vector events; - std::vector temp_buffers; std::vector contexts; }; @@ -1450,8 +1479,6 @@ struct ggml_backend_vk_context { // and set to true after the buffer contents are consumed. bool prealloc_x_need_sync, prealloc_y_need_sync, prealloc_split_k_need_sync; - vk_buffer buffer_pool[MAX_VK_BUFFERS]; - vk_context_ref compute_ctx; vk_context_ref transfer_ctx; @@ -3591,6 +3618,16 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_rwkv_wkv7_f32, "rwkv_wkv7_f32", rwkv_wkv7_f32_len, rwkv_wkv7_f32_data, "main", 8, sizeof(vk_op_rwkv_wkv7_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); + if (device->subgroup_arithmetic && device->subgroup_require_full_support) { + ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d128, "ssm_scan_128_f32", ssm_scan_subgroup_f32_len, ssm_scan_subgroup_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {128, device->subgroup_size, 16}, 1, true, true); + ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d256, "ssm_scan_256_f32", ssm_scan_subgroup_f32_len, ssm_scan_subgroup_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {256, device->subgroup_size, 16}, 1, true, true); + } else { + ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d128, "ssm_scan_128_f32", ssm_scan_f32_len, ssm_scan_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {128, device->subgroup_size, 16}, 1, true, true); + ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d256, "ssm_scan_256_f32", ssm_scan_f32_len, ssm_scan_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {256, device->subgroup_size, 16}, 1, true, true); + } + + ggml_vk_create_pipeline(device, device->pipeline_ssm_conv_f32, "ssm_conv_f32", ssm_conv_f32_len, ssm_conv_f32_data, "main", 3, sizeof(vk_op_ssm_conv_push_constants), {32, 1, 1}, {32}, 1); + ggml_vk_create_pipeline(device, device->pipeline_opt_step_adamw_f32, "opt_step_adamw_f32", opt_step_adamw_f32_len, opt_step_adamw_f32_data, "main", 5, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_opt_step_sgd_f32, "opt_step_sgd_f32", opt_step_sgd_f32_len, opt_step_sgd_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); @@ -3701,6 +3738,11 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_whcn_f16_f32, "conv2d_dw_whcn_f16_f32", conv2d_dw_whcn_f16_f32_len, conv2d_dw_whcn_f16_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_cwhn_f16_f32, "conv2d_dw_cwhn_f16_f32", conv2d_dw_cwhn_f16_f32_len, conv2d_dw_cwhn_f16_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1); + for (uint32_t i = 0; i < num_topk_moe_pipelines; ++i) { + ggml_vk_create_pipeline2(device, device->pipeline_topk_moe[i][0], "topk_moe_f32_"+std::to_string(i), topk_moe_f32_len, topk_moe_f32_data, "main", 3, sizeof(vk_op_topk_moe_push_constants), {1, 1, 1}, {device->subgroup_size, 1u<pipeline_topk_moe[i][1], "topk_moe_f32_"+std::to_string(i), topk_moe_f32_len, topk_moe_f32_data, "main", 3, sizeof(vk_op_topk_moe_push_constants), {1, 1, 1}, {device->subgroup_size, 1u<device->pipeline_dequant_mul_mat_vec_id_f32[a_type]; } -static vk_buffer ggml_vk_pool_malloc(ggml_backend_vk_context * ctx, size_t size) { - VK_LOG_DEBUG("ggml_vk_pool_malloc(" << size << ")"); - VK_LOG_MEMORY("ggml_vk_pool_malloc"); - - int best_i = -1; - size_t best_size = std::numeric_limits::max(); //smallest unused buffer that fits our needs - int worst_i = -1; - size_t worst_size = 0; //largest unused buffer seen so far - for (int i = 0; i < MAX_VK_BUFFERS; ++i) { - vk_buffer &b = ctx->buffer_pool[i]; - if (b != nullptr && b->size >= size && b->size < best_size) { - best_i = i; - best_size = b->size; - } - if (b != nullptr && b->size > worst_size) { - worst_i = i; - worst_size = b->size; - } - } - if(best_i != -1) { - //found the smallest buffer that fits our needs - vk_buffer b = ctx->buffer_pool[best_i]; - ctx->buffer_pool[best_i].reset(); - return b; - } - if(worst_i != -1) { - //no buffer that fits our needs, resize largest one to save memory - vk_buffer& b = ctx->buffer_pool[worst_i]; - ggml_vk_destroy_buffer(b); - } - - return ggml_vk_create_buffer_device(ctx->device, size); -} - -static void ggml_vk_pool_free(ggml_backend_vk_context * ctx, vk_buffer& buffer) { - VK_LOG_DEBUG("ggml_vk_pool_free(" << buffer->size << ")"); - for (int i = 0; i < MAX_VK_BUFFERS; ++i) { - vk_buffer& b = ctx->buffer_pool[i]; - if (b == nullptr) { - b = buffer; - return; - } - } - std::cerr << "ggml_vulkan: WARNING: vk buffer pool full, increase MAX_VK_BUFFERS" << std::endl; - ggml_vk_destroy_buffer(buffer); -} - -// Returns an available temporary buffer that may only be used temporarily, it will be reused -static vk_buffer ggml_vk_create_buffer_temp(ggml_backend_vk_context * ctx, size_t size) { - // Try to find existing temp buffer with enough capacity - for (auto& buffer : ctx->gc.temp_buffers) { - if (buffer->size >= size) { - return buffer; - } - } - - VK_LOG_MEMORY("ggml_vk_create_buffer_temp(" << size << ")"); - - // Otherwise create new buffer - vk_buffer buf = ggml_vk_pool_malloc(ctx, size); - ctx->gc.temp_buffers.push_back(buf); - - return buf; -} - static void * ggml_vk_host_malloc(vk_device& device, size_t size) { VK_LOG_MEMORY("ggml_vk_host_malloc(" << size << ")"); vk_buffer buf = ggml_vk_create_buffer(device, size, @@ -7983,6 +7968,13 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); GGML_ASSERT(!src2 || src2->type == GGML_TYPE_F32); + if (ctx->num_additional_fused_ops) { + uint32_t idx = (uint32_t)ceilf(log2f(float(dst->ne[0]))); + GGML_ASSERT(idx < num_topk_moe_pipelines); + bool with_norm = ctx->num_additional_fused_ops == topk_moe_norm.size() - 1; + return ctx->device->pipeline_topk_moe[idx][with_norm]; + } + if (src0->type == GGML_TYPE_F32 && (src1 == nullptr || src1->type == GGML_TYPE_F32) && dst->type == GGML_TYPE_F32) { return src0->ne[0] > 1024 ? ctx->device->pipeline_soft_max_f32_wg512 : ctx->device->pipeline_soft_max_f32; } @@ -8098,6 +8090,21 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const return ctx->device->pipeline_rwkv_wkv7_f32; } return nullptr; + case GGML_OP_SSM_SCAN: + if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { + const uint32_t d_state = src0->ne[0]; + if (d_state == 128) { + return ctx->device->pipeline_ssm_scan_f32_d128; + } else if (d_state == 256) { + return ctx->device->pipeline_ssm_scan_f32_d256; + } + } + return nullptr; + case GGML_OP_SSM_CONV: + if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { + return ctx->device->pipeline_ssm_conv_f32; + } + return nullptr; case GGML_OP_OPT_STEP_ADAMW: if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { return ctx->device->pipeline_opt_step_adamw_f32; @@ -8592,6 +8599,14 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co } } break; + case GGML_OP_SSM_CONV: + { + const uint32_t nr = src0->ne[1]; + const uint32_t n_t = dst->ne[1]; + const uint32_t n_s = dst->ne[2]; + elements = { nr, n_t, n_s }; + } + break; default: elements = { (uint32_t)ggml_nelements(src0), 1, 1 }; break; @@ -9038,6 +9053,117 @@ static void ggml_vk_rwkv_wkv7(ggml_backend_vk_context * ctx, vk_context& subctx, ); } +static void ggml_vk_ssm_scan(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, bool dryrun = false) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + const ggml_tensor * src3 = dst->src[3]; + const ggml_tensor * src4 = dst->src[4]; + const ggml_tensor * src5 = dst->src[5]; + + GGML_ASSERT(dst->buffer != nullptr); + + const uint32_t head_dim = src0->ne[1]; + const uint32_t n_head = src1->ne[1]; + const uint32_t n_group = src4->ne[1]; + const uint32_t n_tok = src1->ne[2]; + const uint32_t n_seq = src1->ne[3]; + + bool is_mamba2 = (src3->nb[1] == sizeof(float)); + GGML_ASSERT(is_mamba2); + + vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, dst, dst->op); + GGML_ASSERT(pipeline != nullptr); + + if (dryrun) { + ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1); + return; + } + + const int64_t s_off = ggml_nelements(src1) * sizeof(float); + + const vk_op_ssm_scan_push_constants pc = { + (uint32_t)src0->nb[2], (uint32_t)src0->nb[3], + (uint32_t)src1->nb[2], (uint32_t)src1->nb[3], + (uint32_t)src2->nb[1], (uint32_t)src2->nb[2], + (uint32_t)src3->nb[1], + (uint32_t)src4->nb[2], (uint32_t)src4->nb[3], + (uint32_t)src5->nb[2], (uint32_t)src5->nb[3], + (uint32_t)s_off, + n_head, head_dim, n_group, n_tok + }; + + ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context; + ggml_backend_vk_buffer_context * src_buf_ctxs[GGML_MAX_SRC]; + for (int i = 0; i < GGML_MAX_SRC && dst->src[i] != nullptr; i++) { + src_buf_ctxs[i] = (ggml_backend_vk_buffer_context *)dst->src[i]->buffer->context; + } + + vk_buffer d_D = nullptr, d_srcs[GGML_MAX_SRC] = { nullptr }; + size_t dst_offset = 0, src_offsets[GGML_MAX_SRC] = { 0 }; + bool dst_uma = false, srcs_uma[GGML_MAX_SRC] = { false }; + + if (ctx->device->uma) { + for (int i = 0; i < GGML_MAX_SRC && dst->src[i] != nullptr; i++) { + ggml_vk_host_get(ctx->device, dst->src[i]->data, d_srcs[i], src_offsets[i]); + srcs_uma[i] = d_srcs[i] != nullptr; + } + ggml_vk_host_get(ctx->device, dst->data, d_D, dst_offset); + dst_uma = d_D != nullptr; + } + + if (!dst_uma) { + d_D = dst_buf_ctx->dev_buffer; + dst_offset = vk_tensor_offset(dst) + dst->view_offs; + } + for (int i = 0; i < GGML_MAX_SRC && dst->src[i] != nullptr; i++) { + if (!srcs_uma[i]) { + d_srcs[i] = src_buf_ctxs[i]->dev_buffer; + src_offsets[i] = vk_tensor_offset(dst->src[i]) + dst->src[i]->view_offs; + } + } + + size_t dst_size = ggml_nbytes(dst); + size_t src_sizes[GGML_MAX_SRC]; + for (int i = 0; i < GGML_MAX_SRC && dst->src[i] != nullptr; i++) { + src_sizes[i] = ggml_nbytes(dst->src[i]); + } + + std::array elements; + + const int splitH = 16; + const uint32_t num_workgroups_x = CEIL_DIV(n_head * head_dim, splitH); + const uint32_t num_workgroups_y = n_seq; + elements = { num_workgroups_x, num_workgroups_y, 1 }; + + ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { + vk_subbuffer{ d_srcs[0], src_offsets[0], src_sizes[0] }, + vk_subbuffer{ d_srcs[1], src_offsets[1], src_sizes[1] }, + vk_subbuffer{ d_srcs[2], src_offsets[2], src_sizes[2] }, + vk_subbuffer{ d_srcs[3], src_offsets[3], src_sizes[3] }, + vk_subbuffer{ d_srcs[4], src_offsets[4], src_sizes[4] }, + vk_subbuffer{ d_srcs[5], src_offsets[5], src_sizes[5] }, + vk_subbuffer{ d_srcs[6], src_offsets[6], src_sizes[6] }, + vk_subbuffer{ d_D, dst_offset, dst_size } + }, pc, elements); +} + +static void ggml_vk_ssm_conv(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, bool dryrun = false) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_SSM_CONV, { + (uint32_t)src0->nb[1], (uint32_t)src0->nb[2], + (uint32_t)src1->nb[1], + (uint32_t)dst->nb[0], (uint32_t)dst->nb[1], (uint32_t)dst->nb[2], + (uint32_t)src1->ne[0], + (uint32_t)src0->ne[0], + (uint32_t)src0->ne[1], + (uint32_t)dst->ne[1], + (uint32_t)dst->ne[2], + }, dryrun); +} + static void ggml_vk_op_f32_opt_step_adamw(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, const vk_op_push_constants&& pc, bool dryrun = false) { const ggml_tensor * x = dst->src[0]; const ggml_tensor * g = dst->src[1]; @@ -9434,6 +9560,87 @@ static void ggml_vk_soft_max_back(ggml_backend_vk_context * ctx, vk_context& sub ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_SOFT_MAX_BACK, { (uint32_t)src0->ne[0], (uint32_t)ggml_nrows(src0), op_params[0], op_params[1] }, dryrun); } +static void ggml_vk_topk_moe(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_cgraph * cgraph, int node_idx, bool dryrun = false) { + + bool with_norm = ctx->num_additional_fused_ops == topk_moe_norm.size() - 1; + ggml_tensor * logits = cgraph->nodes[node_idx + 0]->src[0]; + ggml_tensor * weights = with_norm ? cgraph->nodes[node_idx + 8] : cgraph->nodes[node_idx + 4]; + ggml_tensor * ids = cgraph->nodes[node_idx + 3]; + + GGML_ASSERT(logits->type == GGML_TYPE_F32); + GGML_ASSERT(weights->type == GGML_TYPE_F32); + GGML_ASSERT(ids->type == GGML_TYPE_I32); + + const int n_experts = logits->ne[0]; + const int n_rows = logits->ne[1]; + const int n_expert_used = weights->ne[1]; + + GGML_ASSERT(ids->nb[1] / ggml_type_size(ids->type) == (size_t) n_experts); + + vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, nullptr, nullptr, nullptr, cgraph->nodes[node_idx], GGML_OP_SOFT_MAX); + + if (dryrun) { + ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1); + return; + } + + ggml_backend_vk_buffer_context * logits_buf_ctx = (ggml_backend_vk_buffer_context *)logits->buffer->context; + ggml_backend_vk_buffer_context * weights_buf_ctx = (ggml_backend_vk_buffer_context *)weights->buffer->context; + ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context; + + vk_buffer d_logits = nullptr; + size_t logits_buf_offset = 0; + vk_buffer d_weights = nullptr; + size_t weights_buf_offset = 0; + vk_buffer d_ids = nullptr; + size_t ids_buf_offset = 0; + + bool logits_uma = false; + bool weights_uma = false; + bool ids_uma = false; + + if (ctx->device->uma) { + ggml_vk_host_get(ctx->device, logits->data, d_logits, logits_buf_offset); + ggml_vk_host_get(ctx->device, weights->data, d_weights, weights_buf_offset); + ggml_vk_host_get(ctx->device, ids->data, d_ids, ids_buf_offset); + logits_uma = d_logits != nullptr; + weights_uma = d_weights != nullptr; + ids_uma = d_ids != nullptr; + } + + if (!logits_uma) { + d_logits = logits_buf_ctx->dev_buffer; + logits_buf_offset = vk_tensor_offset(logits) + logits->view_offs; + GGML_ASSERT(d_logits != nullptr); + } + if (!weights_uma) { + d_weights = weights_buf_ctx->dev_buffer; + weights_buf_offset = vk_tensor_offset(weights) + weights->view_offs; + GGML_ASSERT(d_weights != nullptr); + } + if (!ids_uma) { + d_ids = ids_buf_ctx->dev_buffer; + ids_buf_offset = vk_tensor_offset(ids) + ids->view_offs; + GGML_ASSERT(d_ids != nullptr); + } + + vk_op_topk_moe_push_constants pc; + pc.n_rows = n_rows; + pc.n_expert_used = n_expert_used; + + GGML_ASSERT(n_expert_used <= n_experts); + + const uint32_t rows_per_block = 4; + std::array elements = { CEIL_DIV(n_rows, rows_per_block), 1, 1 }; + + ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, + { + ggml_vk_subbuffer(ctx, d_logits, logits_buf_offset), + ggml_vk_subbuffer(ctx, d_weights, weights_buf_offset), + ggml_vk_subbuffer(ctx, d_ids, ids_buf_offset), + }, pc, elements); +} + static void ggml_vk_rope(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, bool backprop, bool dryrun = false) { const int n_dims = ((int32_t *) dst->op_params)[1]; const int mode = ((int32_t *) dst->op_params)[2]; @@ -10870,6 +11077,8 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr case GGML_OP_CONV_2D_DW: case GGML_OP_RWKV_WKV6: case GGML_OP_RWKV_WKV7: + case GGML_OP_SSM_SCAN: + case GGML_OP_SSM_CONV: case GGML_OP_LEAKY_RELU: case GGML_OP_FLASH_ATTN_EXT: case GGML_OP_OPT_STEP_ADAMW: @@ -11017,11 +11226,11 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr ctx->unsynced_nodes_read.clear(); ggml_vk_sync_buffers(ctx, compute_ctx); } - // Add the last fused node and all fused source nodes to the unsynchronized list. - const ggml_tensor * last_node = cgraph->nodes[node_idx + ctx->num_additional_fused_ops]; - ctx->unsynced_nodes_written.push_back(last_node); + // Add all fused nodes to the unsynchronized lists. for (int32_t i = 0; i < ctx->num_additional_fused_ops + 1; ++i) { const ggml_tensor *cur_node = cgraph->nodes[node_idx + i]; + // Multiple outputs could be written, e.g. in topk_moe. Add them all to the list. + ctx->unsynced_nodes_written.push_back(cur_node); for (uint32_t j = 0; j < GGML_MAX_SRC; ++j) { if (!cur_node->src[j]) { continue; @@ -11188,7 +11397,11 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr break; case GGML_OP_SOFT_MAX: - ggml_vk_soft_max(ctx, compute_ctx, src0, src1, src2, node, dryrun); + if (ctx->num_additional_fused_ops) { + ggml_vk_topk_moe(ctx, compute_ctx, cgraph, node_idx, dryrun); + } else { + ggml_vk_soft_max(ctx, compute_ctx, src0, src1, src2, node, dryrun); + } break; case GGML_OP_SOFT_MAX_BACK: @@ -11287,6 +11500,16 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr break; + case GGML_OP_SSM_SCAN: + ggml_vk_ssm_scan(ctx, compute_ctx, node, dryrun); + + break; + + case GGML_OP_SSM_CONV: + ggml_vk_ssm_conv(ctx, compute_ctx, node, dryrun); + + break; + case GGML_OP_OPT_STEP_ADAMW: ggml_vk_opt_step_adamw(ctx, compute_ctx, node, dryrun); @@ -11398,6 +11621,8 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph * case GGML_OP_CONV_2D_DW: case GGML_OP_RWKV_WKV6: case GGML_OP_RWKV_WKV7: + case GGML_OP_SSM_SCAN: + case GGML_OP_SSM_CONV: case GGML_OP_LEAKY_RELU: case GGML_OP_REPEAT: case GGML_OP_REPEAT_BACK: @@ -11507,10 +11732,6 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph * // Clean up after graph processing is done static void ggml_vk_graph_cleanup(ggml_backend_vk_context * ctx) { VK_LOG_DEBUG("ggml_vk_graph_cleanup()"); - for (auto& buffer : ctx->gc.temp_buffers) { - ggml_vk_pool_free(ctx, buffer); - } - ctx->gc.temp_buffers.clear(); ctx->prealloc_y_last_pipeline_used = {}; ctx->unsynced_nodes_written.clear(); @@ -11553,10 +11774,6 @@ static void ggml_vk_cleanup(ggml_backend_vk_context * ctx) { ggml_vk_destroy_buffer(ctx->prealloc_split_k); ctx->prealloc_y_last_pipeline_used = nullptr; - for (auto& buffer : ctx->buffer_pool) { - ggml_vk_destroy_buffer(buffer); - } - ctx->prealloc_size_x = 0; ctx->prealloc_size_y = 0; ctx->prealloc_size_split_k = 0; @@ -11972,6 +12189,120 @@ static bool ggml_vk_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, st return true; } +static bool ggml_vk_can_fuse_topk_moe(ggml_backend_vk_context * ctx, const struct ggml_cgraph * cgraph, + int node_idx, bool with_norm) { + + if (with_norm) { + if (node_idx + (int)topk_moe_norm.size() > cgraph->n_nodes) { + return false; + } + for (size_t i = 0; i < topk_moe_norm.size(); ++i) { + if (cgraph->nodes[node_idx + i]->op != topk_moe_norm[i]) { + return false; + } + } + } else { + if (node_idx + (int)topk_moe.size() > cgraph->n_nodes) { + return false; + } + for (size_t i = 0; i < topk_moe.size(); ++i) { + if (cgraph->nodes[node_idx + i]->op != topk_moe[i]) { + return false; + } + } + } + + const ggml_tensor * softmax = cgraph->nodes[node_idx + 0]; + const ggml_tensor * weights = with_norm ? cgraph->nodes[node_idx + 8] : cgraph->nodes[node_idx + 4]; + + const float * op_params = (const float *)softmax->op_params; + + float scale = op_params[0]; + float max_bias = op_params[1]; + + if (!ggml_is_contiguous(softmax->src[0]) || !ggml_is_contiguous(weights)) { + return false; + } + + if (scale != 1.0f || max_bias != 0.0f) { + return false; + } + + // don't fuse when masks or sinks are present + if (softmax->src[1] || softmax->src[2]) { + return false; + } + + const int n_expert = softmax->ne[0]; + // n_expert must be a power of 2 + if (!is_pow2(n_expert) || n_expert > (1 << (num_topk_moe_pipelines-1))) { + return false; + } + + // Check that the nodes don't have any unexpected uses + const ggml_tensor * reshape1 = cgraph->nodes[node_idx + 1]; + const ggml_tensor * argsort = cgraph->nodes[node_idx + 2]; + const ggml_tensor * view = cgraph->nodes[node_idx + 3]; + const ggml_tensor * get_rows = cgraph->nodes[node_idx + 4]; + const ggml_tensor * reshape5 = with_norm ? cgraph->nodes[node_idx + 5] : nullptr; + const ggml_tensor * sum_rows = with_norm ? cgraph->nodes[node_idx + 6] : nullptr; + const ggml_tensor * div = with_norm ? cgraph->nodes[node_idx + 7] : nullptr; + const ggml_tensor * reshape8 = with_norm ? cgraph->nodes[node_idx + 8] : nullptr; + + // softmax is used by reshape and argsort + if (ggml_node_get_use_count(cgraph, node_idx) != 2 || + reshape1->src[0] != softmax || + argsort->src[0] != softmax) { + return false; + } + // reshape is used by get_rows + if (ggml_node_get_use_count(cgraph, node_idx + 1) != 1 || + get_rows->src[0] != reshape1) { + return false; + } + // argsort is used by view + if (ggml_node_get_use_count(cgraph, node_idx + 2) != 1 || + view->src[0] != argsort) { + return false; + } + // view is written (via argsort), we can skip checking it + + if (with_norm) { + // get_rows is used by reshape + if (ggml_node_get_use_count(cgraph, node_idx + 4) != 1 || + reshape5->src[0] != get_rows) { + return false; + } + + // reshape is used by sum_rows and div + if (ggml_node_get_use_count(cgraph, node_idx + 5) != 2 || + sum_rows->src[0] != reshape5 || + div->src[0] != reshape5) { + return false; + } + + // sum_rows is used by div + if (ggml_node_get_use_count(cgraph, node_idx + 6) != 1 || + div->src[1] != sum_rows) { + return false; + } + + // div/reshape are written + if (reshape8->src[0] != div) { + return false; + } + } + + if (!ctx->device->subgroup_arithmetic || + !ctx->device->subgroup_shuffle || + !ctx->device->subgroup_require_full_support || + ctx->device->disable_fusion) { + return false; + } + + return true; +} + static uint32_t ggml_vk_fuse_multi_add(ggml_backend_vk_context * ctx, const struct ggml_cgraph * cgraph, int node_idx) { const ggml_tensor *first_node = cgraph->nodes[node_idx]; @@ -12047,6 +12378,10 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg ctx->num_additional_fused_ops = num_adds - 1; } else if (ggml_vk_can_fuse(cgraph, i, { GGML_OP_RMS_NORM, GGML_OP_MUL })) { ctx->num_additional_fused_ops = 1; + } else if (ggml_vk_can_fuse_topk_moe(ctx, cgraph, i, true)) { + ctx->num_additional_fused_ops = topk_moe_norm.size() - 1; + } else if (ggml_vk_can_fuse_topk_moe(ctx, cgraph, i, false)) { + ctx->num_additional_fused_ops = topk_moe.size() - 1; } } ggml_vk_build_graph(ctx, cgraph, i, nullptr, 0, true, false, false, false); @@ -12144,6 +12479,10 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg ctx->num_additional_fused_ops = num_adds - 1; } else if (ggml_vk_can_fuse(cgraph, i, { GGML_OP_RMS_NORM, GGML_OP_MUL })) { ctx->num_additional_fused_ops = 1; + } else if (ggml_vk_can_fuse_topk_moe(ctx, cgraph, i, true)) { + ctx->num_additional_fused_ops = topk_moe_norm.size() - 1; + } else if (ggml_vk_can_fuse_topk_moe(ctx, cgraph, i, false)) { + ctx->num_additional_fused_ops = topk_moe.size() - 1; } } @@ -12151,10 +12490,10 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg bool almost_ready = (cgraph->n_nodes - i) < cgraph->n_nodes / 5; bool submit = (submitted_nodes >= nodes_per_submit) || (mul_mat_bytes >= mul_mat_bytes_per_submit) || - (i + ctx->num_additional_fused_ops == last_node) || + (i + ctx->num_additional_fused_ops >= last_node) || (almost_ready && !ctx->almost_ready_fence_pending); - bool enqueued = ggml_vk_build_graph(ctx, cgraph, i, cgraph->nodes[submit_node_idx], submit_node_idx, false, i + ctx->num_additional_fused_ops == last_node, almost_ready, submit); + bool enqueued = ggml_vk_build_graph(ctx, cgraph, i, cgraph->nodes[submit_node_idx], submit_node_idx, false, i + ctx->num_additional_fused_ops >= last_node, almost_ready, submit); if (vk_perf_logger_enabled) { if (ctx->compute_ctx.expired()) { @@ -12275,6 +12614,25 @@ static void ggml_vk_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * while (first_unused < graph->n_nodes) { std::vector current_set; + // Avoid reordering topk_moe_norm + if (first_unused + (int)topk_moe_norm.size() <= graph->n_nodes) { + bool is_topk_moe_norm = true; + for (size_t j = 0; j < topk_moe_norm.size(); ++j) { + if (graph->nodes[first_unused + j]->op != topk_moe_norm[j] || used[first_unused + j]) { + is_topk_moe_norm = false; + } + } + if (is_topk_moe_norm) { + for (size_t j = 0; j < topk_moe_norm.size(); ++j) { + new_order.push_back(graph->nodes[first_unused + j]); + used[first_unused + j] = true; + } + while (first_unused < graph->n_nodes && used[first_unused]) { + first_unused++; + } + continue; + } + } // First, grab the next unused node. current_set.push_back(first_unused); @@ -12879,6 +13237,47 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_OP_RWKV_WKV6: case GGML_OP_RWKV_WKV7: return true; + case GGML_OP_SSM_SCAN: + { + for (int i = 0; i < 6; i++) { + if (op->src[i] && ggml_is_quantized(op->src[i]->type)) { + return false; + } + } + if (op->src[6] && op->src[6]->type != GGML_TYPE_I32) { + return false; + } + if (op->src[0]->type != GGML_TYPE_F32 || op->type != GGML_TYPE_F32) { + return false; + } + + const uint32_t d_state = op->src[0]->ne[0]; + const uint32_t head_dim = op->src[0]->ne[1]; + + bool is_mamba2 = (op->src[3] && op->src[3]->nb[1] == sizeof(float)); + if (!is_mamba2) { + return false; + } + + if ((d_state != 128 && d_state != 256) || head_dim % 16 != 0) { + return false; + } + + ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context; + const vk_device& device = ggml_vk_get_device(ctx->device); + + const uint32_t SPLIT_H = 16; + + size_t stateC_size = SPLIT_H * d_state * sizeof(float); + + if (stateC_size > device->properties.limits.maxComputeSharedMemorySize) { + return false; + } + + return true; + } + case GGML_OP_SSM_CONV: + return true; case GGML_OP_CONV_TRANSPOSE_1D: return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32; case GGML_OP_CONV_2D: @@ -13223,14 +13622,14 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * struct ggml_context * ggml_ctx = ggml_init(iparams); - std::array src_clone = {nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}; - std::array src_size = {0, 0, 0, 0, 0, 0}; - std::array src_buffer = {nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}; - const char * srci_name[6] = {"src0", "src1", "src2", "src3", "src4", "src5"}; + std::array src_clone = {nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}; + std::array src_size = {}; + std::array src_buffer = {}; + const char * srci_name[GGML_MAX_SRC] = {"src0", "src1", "src2", "src3", "src4", "src5", "src6", "src7", "src8", "src9"}; struct ggml_tensor * tensor_clone = nullptr; - for (int i = 0; i < 6; i++) { + for (int i = 0; i < GGML_MAX_SRC; i++) { ggml_tensor * srci = tensor->src[i]; if (fused_rms_norm_mul) { rms_norm_idx = tensor->src[0]->op == GGML_OP_RMS_NORM ? 0 : 1; @@ -13537,6 +13936,11 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * src_clone[2]); } else if (tensor->op == GGML_OP_ADD_ID) { tensor_clone = ggml_add_id(ggml_ctx, src_clone[0], src_clone[1], src_clone[2]); + } else if (tensor->op == GGML_OP_SSM_SCAN) { + tensor_clone = ggml_ssm_scan(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], + src_clone[3], src_clone[4], src_clone[5], src_clone[6]); + } else if (tensor->op == GGML_OP_SSM_CONV) { + tensor_clone = ggml_ssm_conv(ggml_ctx, src_clone[0], src_clone[1]); } else { std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl; @@ -13558,7 +13962,7 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * memcpy(comp_result, tensor_clone->data, comp_size); memcpy(comp_nb, tensor_clone->nb, sizeof(size_t) * GGML_MAX_DIMS); - for (int i = 0; i < 6; i++) { + for (int i = 0; i < GGML_MAX_SRC; i++) { if (src_buffer[i] != nullptr) { free(src_buffer[i]); } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp index 62acbf107a298..2255f9c168e6e 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp @@ -345,7 +345,7 @@ void main() { float Lfrcp[Br]; [[unroll]] for (uint32_t r = 0; r < Br; ++r) { - Lfrcp[r] = 1.0 / Lf[r]; + Lfrcp[r] = (Lf[r] == 0.0) ? 0.0 : (1.0 / Lf[r]); } [[unroll]] for (uint32_t d = 0; d < HSV_per_thread / 4; ++d) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp index 2066a05b34902..8699fa6c9cbb7 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp @@ -380,7 +380,7 @@ void main() { float Lfrcp[rows_per_thread]; [[unroll]] for (uint32_t r = 0; r < rows_per_thread; ++r) { - Lfrcp[r] = 1.0 / Lf[r]; + Lfrcp[r] = (Lf[r] == 0.0) ? 0.0 : (1.0 / Lf[r]); } [[unroll]] for (uint32_t d = 0; d < HSV_per_thread / 4; ++d) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp index 910da1ab0c28f..fcfc60a878544 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp @@ -121,7 +121,11 @@ void main() { const float NEG_FLT_MAX_OVER_2 = uintBitsToFloat(0xFEFFFFFF); L = coopmat(0); +#if defined(ACC_TYPE_MAX) + M = coopmat(-ACC_TYPE_MAX / ACC_TYPE(2)); +#else M = coopmat(NEG_FLT_MAX_OVER_2); +#endif coopmat slopeMat = coopmat(1.0); @@ -294,7 +298,7 @@ void main() { [[unroll]] for (int k = 0; k < Ldiag.length(); ++k) { - Ldiag[k] = ACC_TYPE(1.0) / Ldiag[k]; + Ldiag[k] = (Ldiag[k] == 0.0) ? ACC_TYPE(0.0) : (ACC_TYPE(1.0) / Ldiag[k]); } O = Ldiag*O; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp index 06e83822fe326..4eaddd31a8f58 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp @@ -91,7 +91,7 @@ void main() { L = L*ms + vs; } - L = 1.0 / L; + L = (L == 0.0) ? 0.0 : 1.0 / L; // D dimension is split across workgroups in the y dimension uint d = tid + gl_WorkGroupID.y * BLOCK_SIZE; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp b/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp new file mode 100644 index 0000000000000..d62696bcfaecf --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp @@ -0,0 +1,44 @@ +#version 450 + +#extension GL_EXT_control_flow_attributes : require + +#include "types.glsl" + +layout(constant_id = 0) const uint BLOCK_SIZE = 32; + +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0) readonly buffer Src0 { float src0[]; }; +layout(binding = 1) readonly buffer Src1 { float src1[]; }; +layout(binding = 2) buffer Dst { float dst[]; }; + +layout(push_constant) uniform PushConstants { + uint nb01; uint nb02; + uint nb11; + uint dst_nb0; uint dst_nb1; uint dst_nb2; + uint nc; uint ncs; uint nr; uint n_t; uint n_s; +}; + +void main() { + const uint global_thread_id = gl_GlobalInvocationID.x; + const uint i2 = gl_WorkGroupID.y; + const uint i3 = gl_WorkGroupID.z; + + if (global_thread_id >= nr || i2 >= n_t || i3 >= n_s) { + return; + } + + const uint i1 = global_thread_id; + const uint src0_base = i3 * (nb02 / 4) + i2 + i1 * (nb01 / 4); + const uint src1_base = i1 * (nb11 / 4); + const uint dst_idx = i3 * (dst_nb2 / 4) + i2 * (dst_nb1 / 4) + i1; + + float sum = 0.0; + [[unroll]] for (uint i0 = 0; i0 < nc; i0++) { + const uint src0_idx = src0_base + i0; + const uint src1_idx = src1_base + i0; + sum += src0[src0_idx] * src1[src1_idx]; + } + + dst[dst_idx] = sum; +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/ssm_scan.comp b/ggml/src/ggml-vulkan/vulkan-shaders/ssm_scan.comp new file mode 100644 index 0000000000000..8f67be9799518 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/ssm_scan.comp @@ -0,0 +1,140 @@ +#version 450 + +#extension GL_EXT_control_flow_attributes : require +#if USE_SUBGROUP_ADD +#extension GL_KHR_shader_subgroup_arithmetic : enable +#endif + +#include "types.glsl" + +layout(constant_id = 0) const uint D_STATE = 128; +layout(constant_id = 1) const uint SUBGROUP_SIZE = 32; +layout(constant_id = 2) const uint SPLIT_H = 16; + +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0) readonly buffer Src0 { float s0[]; }; +layout(binding = 1) readonly buffer Src1 { float x[]; }; +layout(binding = 2) readonly buffer Src2 { float dt[]; }; +layout(binding = 3) readonly buffer Src3 { float A[]; }; +layout(binding = 4) readonly buffer Src4 { float B[]; }; +layout(binding = 5) readonly buffer Src5 { float C[]; }; +layout(binding = 6) readonly buffer Src6 { int ids[]; }; +layout(binding = 7) buffer Dst { float d[]; }; + +layout(push_constant) uniform PushConstants { + uint nb02; uint nb03; uint nb12; uint nb13; + uint nb21; uint nb22; uint nb31; + uint nb42; uint nb43; uint nb52; uint nb53; + uint s_off; + uint n_head; + uint d_head; + uint n_group; + uint n_tok; +}; + +float softplus(float x) { + if (x <= 20.0) { + return log(1.0 + exp(x)); + } else { + return x; + } +} + +shared float stateC[SPLIT_H * D_STATE]; + +void main() { + const uint tid = gl_LocalInvocationID.x; + const uint head_idx = (gl_WorkGroupID.x * SPLIT_H) / d_head; + const uint head_off = ((gl_WorkGroupID.x * SPLIT_H) % d_head) * 4; + const uint seq_idx = gl_WorkGroupID.y; + + const uint group_off = (head_idx / (n_head / n_group)) * D_STATE * 4; + const uint s0_base_idx = (uint(ids[seq_idx]) * nb03 + head_idx * nb02 + head_off * D_STATE) / 4; + const uint x_base_idx = (seq_idx * nb13 + gl_WorkGroupID.x * SPLIT_H * 4) / 4; + const uint dt_base_idx = (seq_idx * nb22 + head_idx * 4) / 4; + const uint A_base_idx = (head_idx * nb31) / 4; + const uint B_base_idx = (seq_idx * nb43 + group_off) / 4; + const uint C_base_idx = (seq_idx * nb53 + group_off) / 4; + const uint y_base_idx = seq_idx * n_tok * n_head * d_head + gl_WorkGroupID.x * SPLIT_H; + const uint s_base_idx = (s_off + seq_idx * nb03 + head_idx * nb02 + head_off * D_STATE) / 4; + + const uint stride_x = nb12 / 4; + const uint stride_dt = nb21 / 4; + const uint stride_B = nb42 / 4; + const uint stride_C = nb52 / 4; + const uint stride_y = n_head * d_head; + + float state[SPLIT_H]; + [[unroll]] for (uint j = 0; j < SPLIT_H; j++) { + state[j] = s0[s0_base_idx + j * D_STATE + tid]; + } + + for (uint i = 0; i < n_tok; i++) { + const float dt_soft_plus = softplus(dt[dt_base_idx + i * stride_dt]); + + const float dA = exp(dt_soft_plus * A[A_base_idx]); + + const float B_val = B[B_base_idx + i * stride_B + tid]; + const float C_val = C[C_base_idx + i * stride_C + tid]; + + [[unroll]] for (uint j = 0; j < SPLIT_H; j++) { + const float x_dt = x[x_base_idx + i * stride_x + j] * dt_soft_plus; + + state[j] = (state[j] * dA) + (B_val * x_dt); + + stateC[j * D_STATE + tid] = state[j] * C_val; + } + + barrier(); + [[unroll]] + for (uint w = D_STATE / 2; w >= SUBGROUP_SIZE; w >>= 1) { + [[unroll]] for (uint j = 0; j < (w * SPLIT_H + D_STATE - 1) / D_STATE; j++) { + const uint k = (tid % w) + (D_STATE * (tid / w)) + j * D_STATE * (D_STATE / w); + if (k < SPLIT_H * D_STATE && (k + w) < SPLIT_H * D_STATE) { + stateC[k] += stateC[k + w]; + } + } + barrier(); + } + + [[unroll]] for (uint j = 0; j < max(1, SPLIT_H / (D_STATE / SUBGROUP_SIZE)); j++) { + const uint idx = (tid % SUBGROUP_SIZE) + + D_STATE * (tid / SUBGROUP_SIZE) + + j * D_STATE * (D_STATE / SUBGROUP_SIZE); + const uint max_idx = SUBGROUP_SIZE - 1 + + D_STATE * ((D_STATE - 1) / SUBGROUP_SIZE) + + j * D_STATE * (D_STATE / SUBGROUP_SIZE); + + if (idx < SPLIT_H * D_STATE || + max_idx < SPLIT_H * D_STATE) { + float sc; +#if USE_SUBGROUP_ADD + sc = stateC[idx]; + sc = subgroupAdd(sc); +#else + [[unroll]] for (uint offset = SUBGROUP_SIZE / 2; offset > 0; offset >>= 1) { + if (idx + offset < SPLIT_H * D_STATE) { + stateC[idx] += stateC[idx + offset]; + } + barrier(); + } + if (tid % SUBGROUP_SIZE == 0) { + sc = stateC[idx]; + } +#endif + + if (tid % SUBGROUP_SIZE == 0) { + const uint k = tid / SUBGROUP_SIZE + j * (D_STATE / SUBGROUP_SIZE); + d[y_base_idx + i * stride_y + k] = sc; + } + } + } + + barrier(); + } + + [[unroll]] for (uint j = 0; j < SPLIT_H; j++) { + d[s_base_idx + j * D_STATE + tid] = state[j]; + } +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/topk_moe.comp b/ggml/src/ggml-vulkan/vulkan-shaders/topk_moe.comp new file mode 100644 index 0000000000000..9e56d5f8a3cc1 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/topk_moe.comp @@ -0,0 +1,139 @@ +#version 450 + +#extension GL_EXT_control_flow_attributes : require +#extension GL_KHR_shader_subgroup_basic : enable +#extension GL_KHR_shader_subgroup_arithmetic : enable +#extension GL_KHR_shader_subgroup_shuffle : enable + +#include "types.glsl" + +layout (push_constant) uniform parameter +{ + uint n_rows; + uint n_expert_used; +}; + +layout(local_size_x_id = 0, local_size_y = 4, local_size_z = 1) in; + +layout(constant_id = 0) const uint WARP_SIZE = 32; +layout(constant_id = 1) const uint n_experts = 512; +layout(constant_id = 2) const bool with_norm = true; + +const uint experts_per_thread = (n_experts > WARP_SIZE) ? n_experts / WARP_SIZE : 1; + +layout (binding = 0, std430) readonly buffer Logits {float logits[];}; +layout (binding = 1, std430) writeonly buffer Weights {float weights[];}; +layout (binding = 2, std430) writeonly buffer Ids {uint ids[];}; + +void main() { + const uint row = gl_WorkGroupID.x * gl_WorkGroupSize.y + gl_LocalInvocationID.y; + if (row >= n_rows) { + return; + } + + const uint logits_offset = n_experts * row; + const uint weights_offset = n_expert_used * row; + const uint ids_offset = n_experts * row; + + float logits_r[experts_per_thread]; + + const float INFINITY = 1.0 / 0.0; + + [[unroll]] + for (uint i = 0; i < n_experts; i += WARP_SIZE) { + const uint expert = i + gl_LocalInvocationID.x; + logits_r[i / WARP_SIZE] = n_experts % WARP_SIZE == 0 || expert < n_experts ? logits[logits_offset + expert] : -INFINITY; + } + + float max_val = logits_r[0]; + + [[unroll]] + for (int i = 1; i < experts_per_thread; i++) { + const float val = logits_r[i]; + max_val = max(val, max_val); + } + + max_val = subgroupMax(max_val); + + float wt[experts_per_thread]; + float tmp = 0.f; + + [[unroll]] + for (int i = 0; i < experts_per_thread; i++) { + const float val = logits_r[i]; + wt[i] = exp(val - max_val); + tmp += wt[i]; + } + + tmp = subgroupAdd(tmp); + + const float inv_sum = 1.0f / tmp; + + [[unroll]] + for (int i = 0; i < experts_per_thread; i++) { + wt[i] = wt[i] * inv_sum; + } + + // at this point, each thread holds a portion of softmax, + // we do the argmax reduce over n_expert_used, each time marking + // the expert weight as -inf to exclude from the next iteration + + float wt_sum = 0.f; + + float output_weights[experts_per_thread]; + + for (int k = 0; k < n_expert_used; k++) { + float max_val = wt[0]; + uint max_expert = gl_LocalInvocationID.x; + + [[unroll]] + for (int i = 1; i < experts_per_thread; i++) { + const uint expert = gl_LocalInvocationID.x + i * WARP_SIZE; + if ((n_experts % WARP_SIZE == 0 || expert < n_experts) && wt[i] > max_val) { + max_val = wt[i]; + max_expert = expert; + } + } + + [[unroll]] + for (uint mask = WARP_SIZE / 2; mask > 0; mask /= 2) { + const float val = subgroupShuffleXor(max_val, mask); + const uint expert = subgroupShuffleXor(max_expert, mask); + if (val > max_val || (val == max_val && expert < max_expert)) { + max_val = val; + max_expert = expert; + } + } + + if ((k & (WARP_SIZE - 1)) == gl_LocalInvocationID.x) { + output_weights[k / WARP_SIZE] = max_val; + } + + if ((max_expert & (WARP_SIZE - 1)) == gl_LocalInvocationID.x) { + wt[max_expert / WARP_SIZE] = -INFINITY; + + ids[ids_offset + k] = max_expert; + if (with_norm) { + wt_sum += max_val; + } + } + } + + if (with_norm) { + wt_sum = subgroupAdd(wt_sum); + const float inv_sum = 1.0f / wt_sum; + + [[unroll]] + for (uint i = 0; i < experts_per_thread; ++i) { + output_weights[i] *= inv_sum; + } + } + + [[unroll]] + for (uint i = 0; i < experts_per_thread; ++i) { + uint idx = i * WARP_SIZE + gl_LocalInvocationID.x; + if (idx < n_expert_used) { + weights[weights_offset + idx] = output_weights[i]; + } + } +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index 184f3f3a7db51..0f25ba3453093 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -916,6 +916,13 @@ void process_shaders() { string_to_spv("multi_add_f32", "multi_add.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}, {"RTE16", "1"}, {"ADD_RMS" , "0"}}); string_to_spv("multi_add_rms_f32", "multi_add.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}, {"RTE16", "1"}, {"ADD_RMS" , "1"}}); + string_to_spv("ssm_scan_f32", "ssm_scan.comp", {{"A_TYPE", "float"}}); + string_to_spv("ssm_scan_subgroup_f32", "ssm_scan.comp", {{"A_TYPE", "float"}, {"USE_SUBGROUP_ADD", "1"}}); + + string_to_spv("ssm_conv_f32", "ssm_conv.comp", {{"A_TYPE", "float"}}); + + string_to_spv("topk_moe_f32", "topk_moe.comp", {}); + for (auto &c : compiles) { c.wait(); } @@ -959,7 +966,7 @@ void write_output_files() { } std::string suffixes[2] = {"_f32", "_f16"}; - for (auto op : {"add", "sub", "mul", "div", "add_rms"}) { + for (std::string op : {"add", "sub", "mul", "div", "add_rms"}) { hdr << "extern const void * " << op << "_data[2][2][2][2];\n"; hdr << "extern const uint64_t " << op << "_len[2][2][2][2];\n"; diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 86f1c31afd7a6..9be35c1be8456 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -6964,6 +6964,78 @@ void ggml_graph_print(const struct ggml_cgraph * cgraph) { GGML_LOG_INFO("========================================\n"); } +static int ggml_node_list_find_tensor(const struct ggml_cgraph * cgraph, + const int * idxs, + int count, + const struct ggml_tensor * tensor) { + GGML_ASSERT(cgraph && idxs); + for (int i = 0; i < count; ++i) { + const int node_idx = idxs[i]; + + if (node_idx >= cgraph->n_nodes) { + return -1; + } + if (cgraph->nodes[node_idx] == tensor) { + return i; + } + } + return -1; +} + +bool ggml_can_fuse_subgraph_ext(const struct ggml_cgraph * cgraph, + const int * node_idxs, + int count, + const enum ggml_op * ops, + const int * outputs, + int num_outputs) { + GGML_ASSERT(outputs && num_outputs > 0); + + for (int i = 0; i < count; ++i) { + if (node_idxs[i] >= cgraph->n_nodes) { + return false; + } + + const struct ggml_tensor * node = cgraph->nodes[node_idxs[i]]; + + if (node->op != ops[i]) { + return false; + } + + if (ggml_node_list_find_tensor(cgraph, outputs, num_outputs, node) != -1) { + continue; + } + + if (node->flags & GGML_TENSOR_FLAG_OUTPUT) { + return false; + } + + int subgraph_uses = 0; + for (int j = i + 1; j < count; ++j) { + const struct ggml_tensor * other_node = cgraph->nodes[node_idxs[j]]; + for (int src_idx = 0; src_idx < GGML_MAX_SRC; src_idx++) { + if (other_node->src[src_idx] == node) { + subgraph_uses++; + } + } + } + + if (subgraph_uses != ggml_node_get_use_count(cgraph, node_idxs[i])) { + return false; + } + + // if node is a view, check if the view_src and all it's parent view_srcs are within the subgraph + struct ggml_tensor * view_src = node->view_src; + while (view_src) { + if (ggml_node_list_find_tensor(cgraph, node_idxs, count, view_src) == -1) { + return false; + } + view_src = view_src->view_src; + } + } + + return true; +} + // check if node is part of the graph static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) { if (cgraph == NULL) { diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index f5e5fba8008bd..1b71fb3749aaa 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -102,6 +102,8 @@ class LLM: EXPERT_COUNT = "{arch}.expert_count" EXPERT_USED_COUNT = "{arch}.expert_used_count" EXPERT_SHARED_COUNT = "{arch}.expert_shared_count" + EXPERT_GROUP_COUNT = "{arch}.expert_group_count" + EXPERT_GROUP_USED_COUNT = "{arch}.expert_group_used_count" EXPERT_WEIGHTS_SCALE = "{arch}.expert_weights_scale" EXPERT_WEIGHTS_NORM = "{arch}.expert_weights_norm" EXPERT_GATING_FUNC = "{arch}.expert_gating_func" @@ -400,6 +402,7 @@ class MODEL_ARCH(IntEnum): WAVTOKENIZER_DEC = auto() PLM = auto() BAILINGMOE = auto() + BAILINGMOE2 = auto() DOTS1 = auto() ARCEE = auto() ERNIE4_5 = auto() @@ -744,6 +747,7 @@ class MODEL_TENSOR(IntEnum): MODEL_ARCH.WAVTOKENIZER_DEC: "wavtokenizer-dec", MODEL_ARCH.PLM: "plm", MODEL_ARCH.BAILINGMOE: "bailingmoe", + MODEL_ARCH.BAILINGMOE2: "bailingmoe2", MODEL_ARCH.DOTS1: "dots1", MODEL_ARCH.ARCEE: "arcee", MODEL_ARCH.ERNIE4_5: "ernie4_5", @@ -2533,6 +2537,35 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_DOWN_SHEXP, MODEL_TENSOR.FFN_UP_SHEXP, ], + MODEL_ARCH.BAILINGMOE2: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q_NORM, + MODEL_TENSOR.ATTN_K_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_EXP_PROBS_B, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_UP_EXP, + MODEL_TENSOR.FFN_GATE_SHEXP, + MODEL_TENSOR.FFN_DOWN_SHEXP, + MODEL_TENSOR.FFN_UP_SHEXP, + MODEL_TENSOR.NEXTN_EH_PROJ, + MODEL_TENSOR.NEXTN_EMBED_TOKENS, + MODEL_TENSOR.NEXTN_ENORM, + MODEL_TENSOR.NEXTN_HNORM, + MODEL_TENSOR.NEXTN_SHARED_HEAD_HEAD, + MODEL_TENSOR.NEXTN_SHARED_HEAD_NORM, + MODEL_TENSOR.LAYER_OUT_NORM, + ], MODEL_ARCH.DOTS1: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.OUTPUT_NORM, diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index 306679e21834b..d52d4f40f7884 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -755,6 +755,12 @@ def add_expert_used_count(self, count: int) -> None: def add_expert_shared_count(self, count: int) -> None: self.add_uint32(Keys.LLM.EXPERT_SHARED_COUNT.format(arch=self.arch), count) + def add_expert_group_count(self, count: int) -> None: + self.add_uint32(Keys.LLM.EXPERT_GROUP_COUNT.format(arch=self.arch), count) + + def add_expert_group_used_count(self, count: int) -> None: + self.add_uint32(Keys.LLM.EXPERT_GROUP_USED_COUNT.format(arch=self.arch), count) + def add_expert_weights_scale(self, value: float) -> None: self.add_float32(Keys.LLM.EXPERT_WEIGHTS_SCALE.format(arch=self.arch), value) diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index c05aa6cc488de..d7dcd8efb8426 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -174,6 +174,7 @@ class TensorNameMap: "h.{bid}.self_attention.query_key_value", # bloom "language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon "model.layers.{bid}.self_attn.query_key_value", # persimmon + "model.layers.{bid}.attention.query_key_value", # bailingmoe2 "h.{bid}.attn.c_attn", # gpt2 "transformer.h.{bid}.mixer.Wqkv", # phi2 "encoder.layers.{bid}.attn.Wqkv", # nomic-bert @@ -260,6 +261,7 @@ class TensorNameMap: "transformer.h.{bid}.attn.out_proj", # gpt-j "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon "model.layers.{bid}.self_attn.dense", # persimmon + "model.layers.{bid}.attention.dense", # bailingmoe2 "h.{bid}.attn.c_proj", # gpt2 "transformer.h.{bid}.mixer.out_proj", # phi2 "model.layers.layers.{bid}.self_attn.o_proj", # plamo @@ -373,6 +375,7 @@ class TensorNameMap: MODEL_TENSOR.FFN_EXP_PROBS_B: ( "model.layers.{bid}.mlp.gate.e_score_correction", # deepseek-v3 dots1 "model.layers.{bid}.mlp.moe_statics.e_score_correction", # ernie4.5-moe + "model.layers.{bid}.mlp.gate.expert_bias", # bailingmoe2 "model.layers.{bid}.feed_forward.expert_bias", # lfm2moe ), @@ -549,6 +552,7 @@ class TensorNameMap: "language_model.encoder.layers.{bid}.self_attention.q_layernorm", "model.layers.{bid}.self_attn.q_layernorm", # persimmon "model.layers.{bid}.self_attn.query_layernorm", # hunyuan + "model.layers.{bid}.attention.query_layernorm", # bailingmoe2 "model.layers.{bid}.self_attn.q_norm", # cohere olmoe chameleon olmo2 "layers.{bid}.self_attn.q_norm", # embeddinggemma "transformer.blocks.{bid}.attn.q_ln", # sea-lion @@ -563,6 +567,7 @@ class TensorNameMap: "language_model.encoder.layers.{bid}.self_attention.k_layernorm", "model.layers.{bid}.self_attn.k_layernorm", # persimmon "model.layers.{bid}.self_attn.key_layernorm", # hunyuan + "model.layers.{bid}.attention.key_layernorm", # bailingmoe2 "model.layers.{bid}.self_attn.k_norm", # cohere olmoe chameleon olmo2 "layers.{bid}.self_attn.k_norm", # embeddinggemma "transformer.blocks.{bid}.attn.k_ln", # sea-lion @@ -584,6 +589,7 @@ class TensorNameMap: "transformer.decoder_layer.{bid}.rms_norm_3", # Grok "encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2 "encoder.layer.{bid}.layer_norm_2", # jina-v2-code + "model.layers.{bid}.final_layernorm", # bailingmoe2 ), MODEL_TENSOR.PER_LAYER_TOKEN_EMBD: ( diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py index 7111557bfdd8c..5c6817109ba57 100644 --- a/gguf-py/gguf/vocab.py +++ b/gguf-py/gguf/vocab.py @@ -14,12 +14,12 @@ SentencePieceProcessor = None try: - from mistral_common.tokens.tokenizers.mistral import MistralTokenizer - from mistral_common.tokens.tokenizers.tekken import Tekkenizer - from mistral_common.tokens.tokenizers.utils import ( + from mistral_common.tokens.tokenizers.mistral import MistralTokenizer # pyright: ignore[reportMissingImports] + from mistral_common.tokens.tokenizers.tekken import Tekkenizer # pyright: ignore[reportMissingImports] + from mistral_common.tokens.tokenizers.utils import ( # pyright: ignore[reportMissingImports] _filter_valid_tokenizer_files, ) - from mistral_common.tokens.tokenizers.sentencepiece import ( + from mistral_common.tokens.tokenizers.sentencepiece import ( # pyright: ignore[reportMissingImports] SentencePieceTokenizer, ) except ImportError: diff --git a/requirements/requirements-convert_hf_to_gguf.txt b/requirements/requirements-convert_hf_to_gguf.txt index 90c98c3ffe526..122b4788d9199 100644 --- a/requirements/requirements-convert_hf_to_gguf.txt +++ b/requirements/requirements-convert_hf_to_gguf.txt @@ -1,5 +1,3 @@ -mistral-common>=1.8.3 - -r ./requirements-convert_legacy_llama.txt --extra-index-url https://download.pytorch.org/whl/cpu diff --git a/scripts/snapdragon/adb/llama-cli.farf b/scripts/snapdragon/adb/llama-cli.farf new file mode 100644 index 0000000000000..de84fe89adb44 --- /dev/null +++ b/scripts/snapdragon/adb/llama-cli.farf @@ -0,0 +1 @@ +0xffff diff --git a/scripts/snapdragon/adb/run-bench.sh b/scripts/snapdragon/adb/run-bench.sh new file mode 100755 index 0000000000000..25e0662016cba --- /dev/null +++ b/scripts/snapdragon/adb/run-bench.sh @@ -0,0 +1,39 @@ +#!/bin/sh +# + +# Basedir on device +basedir=/data/local/tmp/llama.cpp + +branch=. +[ "$B" != "" ] && branch=$B + +adbserial= +[ "$S" != "" ] && adbserial="-s $S" + +model="Llama-3.2-3B-Instruct-Q4_0.gguf" +[ "$M" != "" ] && model="$M" + +device="HTP0" +[ "$D" != "" ] && device="$D" + +verbose="" +[ "$V" != "" ] && verbose="$V" + +opmask= +[ "$OPMASK" != "" ] && opmask="GGML_HEXAGON_OPMASK=$OPMASK" + +nhvx= +[ "$NHVX" != "" ] && nhvx="GGML_HEXAGON_NHVX=$NHVX" + +ndev= +[ "$NDEV" != "" ] && ndev="GGML_HEXAGON_NDEV=$NDEV" + +set -x + +adb $adbserial shell " \ + cd $basedir; \ + LD_LIBRARY_PATH=$basedir/$branch/lib \ + ADSP_LIBRARY_PATH=$basedir/$branch/lib \ + $ndev $nhvx $opmask ./$branch/bin/llama-bench --device $device --mmap 0 -m $basedir/../gguf/$model \ + -t 4 --batch-size 128 -ngl 99 $@ \ +" diff --git a/scripts/snapdragon/adb/run-cli.sh b/scripts/snapdragon/adb/run-cli.sh new file mode 100755 index 0000000000000..763482e55ab33 --- /dev/null +++ b/scripts/snapdragon/adb/run-cli.sh @@ -0,0 +1,52 @@ +#!/bin/sh +# + +# Basedir on device +basedir=/data/local/tmp/llama.cpp + +cli_opts= + +branch=. +[ "$B" != "" ] && branch=$B + +adbserial= +[ "$S" != "" ] && adbserial="-s $S" + +model="Llama-3.2-3B-Instruct-Q4_0.gguf" +[ "$M" != "" ] && model="$M" + +device="HTP0" +[ "$D" != "" ] && device="$D" + +verbose= +[ "$V" != "" ] && verbose="GGML_HEXAGON_VERBOSE=$V" + +experimental= +[ "$E" != "" ] && experimental="GGML_HEXAGON_EXPERIMENTAL=$E" + +sched= +[ "$SCHED" != "" ] && sched="GGML_SCHED_DEBUG=2" cli_opts="$cli_opts -v" + +profile= +[ "$PROF" != "" ] && profile="GGML_HEXAGON_PROFILE=$PROF GGML_HEXAGON_OPSYNC=1" + +opmask= +[ "$OPMASK" != "" ] && opmask="GGML_HEXAGON_OPMASK=$OPMASK" + +nhvx= +[ "$NHVX" != "" ] && nhvx="GGML_HEXAGON_NHVX=$NHVX" + +ndev= +[ "$NDEV" != "" ] && ndev="GGML_HEXAGON_NDEV=$NDEV" + +set -x + +adb $adbserial shell " \ + cd $basedir; ulimit -c unlimited; \ + LD_LIBRARY_PATH=$basedir/$branch/lib \ + ADSP_LIBRARY_PATH=$basedir/$branch/lib \ + $verbose $experimental $sched $opmask $profile $nhvx $ndev \ + ./$branch/bin/llama-cli --no-mmap -m $basedir/../gguf/$model \ + -t 4 --ctx-size 8192 --batch-size 128 -ctk q8_0 -ctv q8_0 -fa on \ + -ngl 99 --device $device $cli_opts $@ \ +" diff --git a/scripts/snapdragon/adb/run-tool.sh b/scripts/snapdragon/adb/run-tool.sh new file mode 100755 index 0000000000000..bfc213e4c5f1c --- /dev/null +++ b/scripts/snapdragon/adb/run-tool.sh @@ -0,0 +1,51 @@ +#!/bin/sh +# + +# Basedir on device +basedir=/data/local/tmp/llama.cpp + +cli_opts= + +branch=. +[ "$B" != "" ] && branch=$B + +adbserial= +[ "$S" != "" ] && adbserial="-s $S" + +device="HTP0" +[ "$D" != "" ] && device="$D" + +verbose= +[ "$V" != "" ] && verbose="GGML_HEXAGON_VERBOSE=$V" + +experimental= +[ "$E" != "" ] && experimental="GGML_HEXAGON_EXPERIMENTAL=$V" + +sched= +[ "$SCHED" != "" ] && sched="GGML_SCHED_DEBUG=2" cli_opts="$cli_opts -v" + +profile= +[ "$PROF" != "" ] && profile="GGML_HEXAGON_PROFILE=$PROF GGML_HEXAGON_OPSYNC=1" + +opmask= +[ "$OPMASK" != "" ] && opmask="GGML_HEXAGON_OPMASK=$OPMASK" + +nhvx= +[ "$NHVX" != "" ] && nhvx="GGML_HEXAGON_NHVX=$NHVX" + +ndev= +[ "$NDEV" != "" ] && ndev="GGML_HEXAGON_NDEV=$NDEV" + +hb= +[ "$HB" != "" ] && hb="GGML_HEXAGON_HOSTBUF=$HB" + +set -x + +tool=$1; shift + +adb $adbserial shell " \ + cd $basedir; ulimit -c unlimited; \ + LD_LIBRARY_PATH=$basedir/$branch/lib \ + ADSP_LIBRARY_PATH=$basedir/$branch/lib \ + $verbose $experimental $sched $opmask $profile $nhvx $ndev $hb ./$branch/bin/$tool $@ \ +" diff --git a/scripts/snapdragon/qdc/readme.md b/scripts/snapdragon/qdc/readme.md new file mode 100644 index 0000000000000..b92cf243aaad0 --- /dev/null +++ b/scripts/snapdragon/qdc/readme.md @@ -0,0 +1 @@ +This directory includes pytest based scripts for running CI jobs on Qualcomm Device Cloud (QDC). diff --git a/scripts/snapdragon/qdc/requirements.txt b/scripts/snapdragon/qdc/requirements.txt new file mode 100644 index 0000000000000..f04bd682ea03c --- /dev/null +++ b/scripts/snapdragon/qdc/requirements.txt @@ -0,0 +1,25 @@ +Appium-Python-Client==5.2.4 +attrs==25.4.0 +certifi==2025.10.5 +exceptiongroup==1.3.0 +h11==0.16.0 +idna==3.11 +iniconfig==2.1.0 +outcome==1.3.0.post0 +packaging==25.0 +pluggy==1.6.0 +Pygments==2.19.2 +PySocks==1.7.1 +pytest==8.4.2 +pytest-dependency==0.6.0 +selenium==4.36.0 +setuptools==80.9.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +tomli==2.3.0 +trio==0.31.0 +trio-websocket==0.12.2 +typing_extensions==4.15.0 +urllib3==2.5.0 +websocket-client==1.9.0 +wsproto==1.2.0 diff --git a/scripts/snapdragon/qdc/tests/test_bench.py b/scripts/snapdragon/qdc/tests/test_bench.py new file mode 100644 index 0000000000000..651ab5b717200 --- /dev/null +++ b/scripts/snapdragon/qdc/tests/test_bench.py @@ -0,0 +1,63 @@ +import pytest +import subprocess +import sys + +tmp_path='/data/local/tmp' +pkg_path=f'{tmp_path}/llama.cpp' +lib_path=f'{pkg_path}/lib' +bin_path=f'{pkg_path}/bin' + +model='../gguf/Llama-3.2-1B-Instruct-Q4_0.gguf' +cli_pref=f'cd {pkg_path} && LD_LIBRARY_PATH={lib_path} ADSP_LIBRARY_PATH={lib_path} {bin_path}' + + +def run_cmd(cmd): + p = subprocess.run(cmd, text = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) + sys.stdout.write(p.stdout) + assert(p.returncode == 0) + + +@pytest.mark.dependency() +def test_install(): + run_cmd(['adb', 'push', 'llama.cpp', f'{tmp_path}']) + run_cmd(['adb', 'shell', f'chmod 755 {bin_path}/*']) + + +## Basic cli tests +def run_llama_cli(dev, opts): + prompt='what is the most popular cookie in the world?\nPlease provide a very brief bullet point summary.\nBegin your answer with **BEGIN**.' + opts = '--batch-size 128 -n 128 -no-cnv --seed 42 ' + opts + run_cmd(['adb', 'shell', f'{cli_pref}/llama-cli -m {model} --device {dev} -ngl 99 -t 4 {opts} -p "{prompt}"']) + + +@pytest.mark.dependency(depends=['test_install']) +def test_llama_cli_cpu(): + run_llama_cli('none', '-ctk q8_0 -ctv q8_0 -fa on') + + +@pytest.mark.dependency(depends=['test_install']) +def test_llama_cli_gpu(): + run_llama_cli('GPUOpenCL', '-fa on') + + +@pytest.mark.dependency(depends=['test_install']) +def test_llama_cli_npu(): + run_llama_cli('HTP0', '-ctk q8_0 -ctv q8_0 -fa on') + + +## Basic bench tests +def run_llama_bench(dev): + run_cmd(['adb', 'shell', f'{cli_pref}/llama-bench -m {model} --device {dev} -ngl 99 --batch-size 128 -t 4 -p 128 -n 32']) + + +@pytest.mark.dependency(depends=['test_install']) +def test_llama_bench_cpu(): + run_llama_bench('none') + + +def test_llama_bench_gpu(): + run_llama_bench('GPUOpenCL') + + +def test_llama_bench_npu(): + run_llama_bench('HTP0') diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index b7e00b275b6f7..8ca769c5fd2ef 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -85,6 +85,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" }, { LLM_ARCH_PLM, "plm" }, { LLM_ARCH_BAILINGMOE, "bailingmoe" }, + { LLM_ARCH_BAILINGMOE2, "bailingmoe2" }, { LLM_ARCH_DOTS1, "dots1" }, { LLM_ARCH_ARCEE, "arcee" }, { LLM_ARCH_ERNIE4_5, "ernie4_5" }, @@ -135,6 +136,8 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_EXPERT_COUNT, "%s.expert_count" }, { LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" }, { LLM_KV_EXPERT_SHARED_COUNT, "%s.expert_shared_count" }, + { LLM_KV_EXPERT_GROUP_COUNT, "%s.expert_group_count" }, + { LLM_KV_EXPERT_GROUP_USED_COUNT, "%s.expert_group_used_count" }, { LLM_KV_EXPERT_WEIGHTS_SCALE, "%s.expert_weights_scale" }, { LLM_KV_EXPERT_WEIGHTS_NORM, "%s.expert_weights_norm" }, { LLM_KV_EXPERT_GATING_FUNC, "%s.expert_gating_func" }, @@ -1946,6 +1949,38 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, }, }, + { + LLM_ARCH_BAILINGMOE2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + { LLM_TENSOR_NEXTN_EH_PROJ, "blk.%d.nextn.eh_proj" }, + { LLM_TENSOR_NEXTN_EMBED_TOKENS, "blk.%d.nextn.embed_tokens" }, + { LLM_TENSOR_NEXTN_ENORM, "blk.%d.nextn.enorm" }, + { LLM_TENSOR_NEXTN_HNORM, "blk.%d.nextn.hnorm" }, + { LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "blk.%d.nextn.shared_head_head" }, + { LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "blk.%d.nextn.shared_head_norm" }, + { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" }, + }, + }, { LLM_ARCH_DOTS1, { diff --git a/src/llama-arch.h b/src/llama-arch.h index c41de89859d5c..dea725c1a753a 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -89,6 +89,7 @@ enum llm_arch { LLM_ARCH_WAVTOKENIZER_DEC, LLM_ARCH_PLM, LLM_ARCH_BAILINGMOE, + LLM_ARCH_BAILINGMOE2, LLM_ARCH_DOTS1, LLM_ARCH_ARCEE, LLM_ARCH_ERNIE4_5, @@ -139,6 +140,8 @@ enum llm_kv { LLM_KV_EXPERT_COUNT, LLM_KV_EXPERT_USED_COUNT, LLM_KV_EXPERT_SHARED_COUNT, + LLM_KV_EXPERT_GROUP_COUNT, + LLM_KV_EXPERT_GROUP_USED_COUNT, LLM_KV_EXPERT_WEIGHTS_SCALE, LLM_KV_EXPERT_WEIGHTS_NORM, LLM_KV_EXPERT_GATING_FUNC, diff --git a/src/llama-batch.h b/src/llama-batch.h index d563adc66aaf5..0dc8cebd2a7b3 100644 --- a/src/llama-batch.h +++ b/src/llama-batch.h @@ -123,7 +123,7 @@ class llama_batch_allocr { uint32_t n_seq_max; uint32_t n_outputs; - std::array seq_id_0 = { 0 }; // default sequence id + std::array seq_id_0 = {{ 0 }}; // default sequence id std::vector pos; std::vector n_seq_id; diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index 956c4e085e5b6..0285006d73caa 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -63,6 +63,8 @@ static const std::map LLM_CHAT_TEMPLATES = { { "megrez", LLM_CHAT_TEMPLATE_MEGREZ }, { "yandex", LLM_CHAT_TEMPLATE_YANDEX }, { "bailing", LLM_CHAT_TEMPLATE_BAILING }, + { "bailing-think", LLM_CHAT_TEMPLATE_BAILING_THINK }, + { "bailing2", LLM_CHAT_TEMPLATE_BAILING2 }, { "llama4", LLM_CHAT_TEMPLATE_LLAMA4 }, { "smolvlm", LLM_CHAT_TEMPLATE_SMOLVLM }, { "hunyuan-moe", LLM_CHAT_TEMPLATE_HUNYUAN_MOE }, @@ -191,6 +193,10 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { return LLM_CHAT_TEMPLATE_YANDEX; } else if (tmpl_contains("ASSISTANT") && tmpl_contains("'HUMAN'")) { return LLM_CHAT_TEMPLATE_BAILING; + } else if (tmpl_contains("ASSISTANT") && tmpl_contains("\"HUMAN\"") && tmpl_contains("")) { + return LLM_CHAT_TEMPLATE_BAILING_THINK; + } else if (tmpl_contains("ASSISTANT") && tmpl_contains("HUMAN") && tmpl_contains("<|role_end|>")) { + return LLM_CHAT_TEMPLATE_BAILING2; } else if (tmpl_contains("<|header_start|>") && tmpl_contains("<|header_end|>")) { return LLM_CHAT_TEMPLATE_LLAMA4; } else if (tmpl_contains("<|endofuserprompt|>")) { @@ -644,8 +650,8 @@ int32_t llm_chat_apply_template( if (add_ass) { ss << " Ассистент:[SEP]"; } - } else if (tmpl == LLM_CHAT_TEMPLATE_BAILING) { - // Bailing (Ling) template + } else if (tmpl == LLM_CHAT_TEMPLATE_BAILING || tmpl == LLM_CHAT_TEMPLATE_BAILING_THINK) { + // Bailing (Ling/Ring) template for (auto message : chat) { std::string role(message->role); @@ -658,6 +664,33 @@ int32_t llm_chat_apply_template( ss << "" << role << "" << message->content; } + if (add_ass) { + ss << "ASSISTANT"; + + if (tmpl == LLM_CHAT_TEMPLATE_BAILING_THINK) { + ss << ""; + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_BAILING2) { + // Bailing2 (Ling 2.0) template + bool has_system = !chat.empty() && std::string(chat[0]->role) == "system"; + + if (!has_system) { + ss << "SYSTEMdetailed thinking off<|role_end|>"; + } + + for (auto message : chat) { + std::string role(message->role); + + if (role == "user") { + role = "HUMAN"; + } else { + std::transform(role.begin(), role.end(), role.begin(), ::toupper); + } + + ss << "" << role << "" << message->content << "<|role_end|>"; + } + if (add_ass) { ss << "ASSISTANT"; } diff --git a/src/llama-chat.h b/src/llama-chat.h index 5a87d9ab627bc..da1b7c47997ca 100644 --- a/src/llama-chat.h +++ b/src/llama-chat.h @@ -42,6 +42,8 @@ enum llm_chat_template { LLM_CHAT_TEMPLATE_MEGREZ, LLM_CHAT_TEMPLATE_YANDEX, LLM_CHAT_TEMPLATE_BAILING, + LLM_CHAT_TEMPLATE_BAILING_THINK, + LLM_CHAT_TEMPLATE_BAILING2, LLM_CHAT_TEMPLATE_LLAMA4, LLM_CHAT_TEMPLATE_SMOLVLM, LLM_CHAT_TEMPLATE_DOTS1, diff --git a/src/llama-context.cpp b/src/llama-context.cpp index e7526e7d0a557..bd348bcad370a 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -2346,7 +2346,8 @@ llama_context * llama_init_from_model( return nullptr; } - if (params.pooling_type != model->hparams.pooling_type) { + if (params.pooling_type != LLAMA_POOLING_TYPE_UNSPECIFIED && + params.pooling_type != model->hparams.pooling_type) { //user-specified pooling-type is different from the model default LLAMA_LOG_WARN("%s: model default pooling_type is [%d], but [%d] was specified\n", __func__, model->hparams.pooling_type, params.pooling_type); diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index f29a1e98c9103..41fa6894377ea 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -950,6 +950,31 @@ ggml_tensor * llm_graph_context::build_moe_ffn( cb(selection_probs, "ffn_moe_probs_biased", il); } + // select top n_group_used expert groups + // https://huggingface.co/deepseek-ai/DeepSeek-V3/blob/e815299b0bcbac849fa540c768ef21845365c9eb/modeling_deepseek.py#L440-L457 + if (hparams.n_expert_groups > 1 && n_tokens > 0) { + const int64_t n_exp_per_group = n_expert / hparams.n_expert_groups; + + // organize experts into n_expert_groups + ggml_tensor * selection_groups = ggml_reshape_3d(ctx0, selection_probs, n_exp_per_group, hparams.n_expert_groups, n_tokens); // [n_exp_per_group, n_expert_groups, n_tokens] + + ggml_tensor * group_scores = ggml_top_k(ctx0, selection_groups, 2); // [2, n_expert_groups, n_tokens] + group_scores = ggml_get_rows(ctx0, ggml_reshape_4d(ctx0, selection_groups, 1, selection_groups->ne[0], selection_groups->ne[1], selection_groups->ne[2]), group_scores); // [1, 2, n_expert_groups, n_tokens] + + // get top n_group_used expert groups + group_scores = ggml_sum_rows(ctx0, ggml_reshape_3d(ctx0, group_scores, group_scores->ne[1], group_scores->ne[2], group_scores->ne[3])); // [1, n_expert_groups, n_tokens] + group_scores = ggml_reshape_2d(ctx0, group_scores, group_scores->ne[1], group_scores->ne[2]); // [n_expert_groups, n_tokens] + + ggml_tensor * expert_groups = ggml_top_k(ctx0, group_scores, hparams.n_group_used); // [n_group_used, n_tokens] + cb(expert_groups, "ffn_moe_group_topk", il); + + // mask out the other groups + selection_probs = ggml_get_rows(ctx0, selection_groups, expert_groups); // [n_exp_per_group, n_group_used, n_tokens] + selection_probs = ggml_set_rows(ctx0, ggml_scale_bias(ctx0, selection_groups, 0.0f, -INFINITY), selection_probs, expert_groups); // [n_exp_per_group, n_expert_groups, n_tokens] + selection_probs = ggml_reshape_2d(ctx0, selection_probs, n_expert, n_tokens); // [n_expert, n_tokens] + cb(selection_probs, "ffn_moe_probs_masked", il); + } + // select experts ggml_tensor * selected_experts = ggml_top_k(ctx0, selection_probs, n_expert_used); // [n_expert_used, n_tokens] cb(selected_experts->src[0], "ffn_moe_argsort", il); @@ -981,6 +1006,11 @@ ggml_tensor * llm_graph_context::build_moe_ffn( ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights); // [1, n_tokens] cb(weights_sum, "ffn_moe_weights_sum", il); + if (arch == LLM_ARCH_BAILINGMOE2) { + weights_sum = ggml_scale_bias(ctx0, weights_sum, 1.0, 1e-20); + cb(weights_sum, "ffn_moe_weights_sum_biased", il); + } + weights = ggml_div(ctx0, weights, weights_sum); // [n_expert_used, n_tokens] cb(weights, "ffn_moe_weights_norm", il); diff --git a/src/llama-hparams.h b/src/llama-hparams.h index 4e7f73ec234c3..6fcf91b7daa47 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -72,6 +72,8 @@ struct llama_hparams { uint32_t n_ff_chexp = 0; uint32_t n_expert_shared = 0; uint32_t n_norm_groups = 0; + uint32_t n_expert_groups = 0; + uint32_t n_group_used = 0; uint32_t n_group_experts = 0; float expert_group_scale = 0.05f; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 5002bd42ff04e..2a83d66279b79 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -114,9 +114,12 @@ const char * llm_type_name(llm_type type) { case LLM_TYPE_17B_16E: return "17Bx16E (Scout)"; case LLM_TYPE_17B_128E: return "17Bx128E (Maverick)"; case LLM_TYPE_A13B: return "A13B"; + case LLM_TYPE_7B_A1B: return "7B.A1B"; case LLM_TYPE_8B_A1B: return "8B.A1B"; + case LLM_TYPE_16B_A1B: return "16B.A1B"; case LLM_TYPE_21B_A3B: return "21B.A3B"; case LLM_TYPE_30B_A3B: return "30B.A3B"; + case LLM_TYPE_100B_A6B: return "100B.A6B"; case LLM_TYPE_106B_A12B: return "106B.A12B"; case LLM_TYPE_235B_A22B: return "235B.A22B"; case LLM_TYPE_300B_A47B: return "300B.A47B"; @@ -401,6 +404,19 @@ static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode s // add the device default buffer type buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev)); + // add the device extra buffer type (if any) + ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev); + auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t) + ggml_backend_reg_get_proc_address(reg, "ggml_backend_dev_get_extra_bufts"); + + if (ggml_backend_dev_get_extra_bufts_fn) { + ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(dev); + while (extra_bufts && *extra_bufts) { + buft_list.emplace_back(dev, *extra_bufts); + ++extra_bufts; + } + } + return buft_list; } @@ -421,11 +437,8 @@ struct llama_model::impl { llama_mlocks mlock_bufs; llama_mlocks mlock_mmaps; - // contexts where the model tensors metadata is stored - std::vector ctxs; - - // the model memory buffers for the tensor data - std::vector bufs; + // contexts where the model tensors metadata is stored as well ass the corresponding buffers: + std::vector> ctxs_bufs; buft_list_t cpu_buft_list; std::map gpu_buft_list; @@ -483,11 +496,13 @@ void llama_model::load_hparams(llama_model_loader & ml) { return; } - ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train); - ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd); - ml.get_key(LLM_KV_BLOCK_COUNT, hparams.n_layer); - ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert, false); - ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false); + ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train); + ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd); + ml.get_key(LLM_KV_BLOCK_COUNT, hparams.n_layer); + ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert, false); + ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false); + ml.get_key(LLM_KV_EXPERT_GROUP_COUNT, hparams.n_expert_groups, false); + ml.get_key(LLM_KV_EXPERT_GROUP_USED_COUNT, hparams.n_group_used, false); if (arch == LLM_ARCH_WAVTOKENIZER_DEC) { ml.get_key(LLM_KV_FEATURES_LENGTH, hparams.n_embd_features); @@ -503,8 +518,15 @@ void llama_model::load_hparams(llama_model_loader & ml) { GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert); if (hparams.n_expert > 0) { GGML_ASSERT(hparams.n_expert_used > 0); + GGML_ASSERT(hparams.n_expert_groups < hparams.n_expert); + if (hparams.n_expert_groups > 1) { + GGML_ASSERT(hparams.n_expert % hparams.n_expert_groups == 0); + GGML_ASSERT(hparams.n_group_used > 0); + GGML_ASSERT(hparams.n_group_used < hparams.n_expert_groups); + } } else { GGML_ASSERT(hparams.n_expert_used == 0); + GGML_ASSERT(hparams.n_expert_groups == 0); } std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0); @@ -1846,8 +1868,10 @@ void llama_model::load_hparams(llama_model_loader & ml) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); - switch (hparams.n_layer) { - // TODO: Add llm type label (not sure this is useful) + switch (hparams.n_embd) { + case 1536: type = LLM_TYPE_7B_A1B; break; + case 2048: case 2560: type = LLM_TYPE_3B; break; + case 4096: type = LLM_TYPE_32B; break; default: type = LLM_TYPE_UNKNOWN; } @@ -1888,6 +1912,29 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; + case LLM_ARCH_BAILINGMOE2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp); + ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false); + ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func); + ml.get_key(LLM_KV_NEXTN_PREDICT_LAYERS, hparams.nextn_predict_layers, false); + + // TODO: when MTP is implemented, this should probably be updated if needed + hparams.n_layer_kv_from_start = hparams.n_layer - hparams.nextn_predict_layers; + + switch (hparams.n_layer) { + case 20: type = LLM_TYPE_16B_A1B; break; + case 21: type = LLM_TYPE_16B_A1B; break; + case 32: type = LLM_TYPE_100B_A6B; break; + case 33: type = LLM_TYPE_100B_A6B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; case LLM_ARCH_DOTS1: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); @@ -2182,7 +2229,14 @@ bool llama_model::load_tensors(llama_model_loader & ml) { max_n_tensors += n_layer*2; // duplicated rope freq tensors const size_t ctx_size = ggml_tensor_overhead()*max_n_tensors; - std::map ctx_map; + // define a comparator for the buft -> ctx map to ensure that the order is well-defined: + struct ggml_backend_buft_comparator { + bool operator()(const ggml_backend_buffer_type_t & lhs, const ggml_backend_buffer_type_t & rhs) const { + return ggml_backend_buft_name(lhs) < ggml_backend_buft_name(rhs); + } + }; + std::map ctx_map; + auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { auto it = ctx_map.find(buft); if (it == ctx_map.end()) { @@ -2197,12 +2251,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) { throw std::runtime_error(format("failed to create ggml context")); } - ctx_map[buft] = ctx; - pimpl->ctxs.emplace_back(ctx); + ctx_map.emplace(buft, ctx); return ctx; } - return it->second; + return it->second.get(); }; const auto TENSOR_DUPLICATED = llama_model_loader::TENSOR_DUPLICATED; @@ -5492,6 +5545,70 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0); } } break; + case LLM_ARCH_BAILINGMOE2: + { + const int64_t n_ff_exp = hparams.n_ff_exp; + const int64_t n_expert_shared = hparams.n_expert_shared; + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + GGML_ASSERT(n_expert > 0 && "n_expert must be > 0 for bailingmoe2"); + GGML_ASSERT(n_expert_used > 0 && "n_expert_used must be > 0 for bailingmoe2"); + + for (int i = 0; i < n_layer; ++i) { + int flags = 0; + if (hparams.nextn_predict_layers > 0 && static_cast(i) >= n_layer - hparams.nextn_predict_layers) { + // skip all tensors in the NextN layers + flags |= TENSOR_SKIP; + } + + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, flags); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, flags); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, flags); + + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, flags); + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, flags); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, flags); + + if (static_cast(i) >= hparams.n_layer_dense_lead) { // MoE layers + const int64_t n_ff_shexp = (hparams.n_ff_shexp ? hparams.n_ff_shexp : n_ff_exp) * n_expert_shared; + + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, flags); + layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED | flags); + + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, flags); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, flags); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, flags); + + layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_shexp}, flags); + layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd}, flags); + layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_shexp}, flags); + } else { // Dense layers + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, flags); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, flags); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, flags); + } + + // NextN/MTP tensors (preserved but unused) - conditionally load for last nextn_predict_layers + if (hparams.nextn_predict_layers > 0 && static_cast(i) >= n_layer - hparams.nextn_predict_layers) { + layer.nextn.eh_proj = create_tensor(tn(LLM_TENSOR_NEXTN_EH_PROJ, "weight", i), { 2 * n_embd, n_embd }, flags); + layer.nextn.embed_tokens = create_tensor(tn(LLM_TENSOR_NEXTN_EMBED_TOKENS, "weight", i), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED | flags); + layer.nextn.enorm = create_tensor(tn(LLM_TENSOR_NEXTN_ENORM, "weight", i), { n_embd }, flags); + layer.nextn.hnorm = create_tensor(tn(LLM_TENSOR_NEXTN_HNORM, "weight", i), { n_embd }, flags); + layer.nextn.shared_head_head = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "weight", i), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED | flags); + layer.nextn.shared_head_norm = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "weight", i), { n_embd }, TENSOR_NOT_REQUIRED | flags); + layer.layer_out_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, flags); + } + } + } break; case LLM_ARCH_DOTS1: { const int64_t n_ff_exp = hparams.n_ff_exp; @@ -6037,16 +6154,15 @@ bool llama_model::load_tensors(llama_model_loader & ml) { pimpl->mappings.reserve(ml.mappings.size()); // create the backend buffers - std::vector> ctx_bufs; - ctx_bufs.reserve(ctx_map.size()); + std::vector> ctx_buf_maps; + ctx_buf_maps.reserve(ctx_map.size()); // Ensure we have enough capacity for the maximum backend buffer we will potentially create const size_t n_max_backend_buffer = ctx_map.size() * ml.files.size(); - pimpl->bufs.reserve(n_max_backend_buffer); + pimpl->ctxs_bufs.reserve(n_max_backend_buffer); - for (auto & it : ctx_map) { - ggml_backend_buffer_type_t buft = it.first; - ggml_context * ctx = it.second; + for (auto & [buft, ctx_ptr] : ctx_map) { + ggml_context * ctx = ctx_ptr.get(); // skip contexts without tensors if (ggml_get_first_tensor(ctx) == nullptr) { @@ -6070,6 +6186,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { bool buffer_from_host_ptr_supported = props.caps.buffer_from_host_ptr; bool is_default_buft = buft == ggml_backend_dev_buffer_type(dev); + ggml_backend_buffer_t buf = nullptr; if (ml.use_mmap && use_mmap_buffer && buffer_from_host_ptr_supported && is_default_buft) { for (uint32_t idx = 0; idx < ml.files.size(); idx++) { // only the mmap region containing the tensors in the model is mapped to the backend buffer @@ -6082,20 +6199,18 @@ bool llama_model::load_tensors(llama_model_loader & ml) { continue; } const size_t max_size = ggml_get_max_tensor_size(ctx); - ggml_backend_buffer_t buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size); + buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size); if (buf == nullptr) { throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft))); } - pimpl->bufs.emplace_back(buf); buf_map.emplace(idx, buf); } } else { - ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); + buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); if (buf == nullptr) { throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft))); } - pimpl->bufs.emplace_back(buf); if (use_mlock && ggml_backend_buffer_is_host(buf)) { pimpl->mlock_bufs.emplace_back(new llama_mlock); auto & mlock_buf = pimpl->mlock_bufs.back(); @@ -6106,10 +6221,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { buf_map.emplace(idx, buf); } } - - if (pimpl->bufs.empty()) { - throw std::runtime_error("failed to allocate buffer"); - } + pimpl->ctxs_bufs.emplace_back(std::move(ctx_ptr), buf); for (auto & buf : buf_map) { // indicate that this buffer contains weights @@ -6117,7 +6229,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { ggml_backend_buffer_set_usage(buf.second, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); } - ctx_bufs.emplace_back(ctx, buf_map); + ctx_buf_maps.emplace_back(ctx, buf_map); } if (llama_supports_gpu_offload()) { @@ -6135,22 +6247,20 @@ bool llama_model::load_tensors(llama_model_loader & ml) { } // print memory requirements per buffer type - for (auto & buf : pimpl->bufs) { + for (auto & [_, buf] : pimpl->ctxs_bufs) { LLAMA_LOG_INFO("%s: %12s model buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get()) / 1024.0 / 1024.0); } // populate tensors_by_name - for (auto & ctx : pimpl->ctxs) { + for (auto & [ctx, _] : pimpl->ctxs_bufs) { for (auto * cur = ggml_get_first_tensor(ctx.get()); cur != NULL; cur = ggml_get_next_tensor(ctx.get(), cur)) { tensors_by_name.emplace_back(ggml_get_name(cur), cur); } } // load tensor data - for (auto & it : ctx_bufs) { - ggml_context * ctx = it.first; - auto & bufs = it.second; - if (!ml.load_all_data(ctx, bufs, use_mlock ? &pimpl->mlock_mmaps : NULL, params.progress_callback, params.progress_callback_user_data)) { + for (auto & [ctx, buf_map] : ctx_buf_maps) { + if (!ml.load_all_data(ctx, buf_map, use_mlock ? &pimpl->mlock_mmaps : NULL, params.progress_callback, params.progress_callback_user_data)) { return false; } } @@ -6190,8 +6300,8 @@ size_t llama_model::n_devices() const { std::map llama_model::memory_breakdown() const { std::map ret; - for (const ggml_backend_buffer_ptr & buf_ptr : pimpl->bufs) { - ret[ggml_backend_buffer_get_type(buf_ptr.get())] += ggml_backend_buffer_get_size(buf_ptr.get()); + for (const auto & [_, buf] : pimpl->ctxs_bufs) { + ret[ggml_backend_buffer_get_type(buf.get())] += ggml_backend_buffer_get_size(buf.get()); } return ret; } @@ -6354,6 +6464,19 @@ void llama_model::print_info() const { LLAMA_LOG_INFO("%s: expert_weights_norm = %d\n", __func__, hparams.expert_weights_norm); } + if (arch == LLM_ARCH_BAILINGMOE2) { + LLAMA_LOG_INFO("%s: n_layer_dense_lead = %d\n", __func__, hparams.n_layer_dense_lead); + LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp); + LLAMA_LOG_INFO("%s: n_ff_shexp = %d\n", __func__, hparams.n_ff_shexp); + LLAMA_LOG_INFO("%s: n_expert_shared = %d\n", __func__, hparams.n_expert_shared); + LLAMA_LOG_INFO("%s: n_expert_groups = %d\n", __func__, hparams.n_expert_groups); + LLAMA_LOG_INFO("%s: n_group_used = %d\n", __func__, hparams.n_group_used); + LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n", __func__, hparams.expert_weights_scale); + LLAMA_LOG_INFO("%s: expert_weights_norm = %d\n", __func__, hparams.expert_weights_norm); + LLAMA_LOG_INFO("%s: expert_gating_func = %s\n", __func__, llama_expert_gating_func_name((llama_expert_gating_func_type) hparams.expert_gating_func)); + LLAMA_LOG_INFO("%s: nextn_predict_layers = %d\n", __func__, hparams.nextn_predict_layers); + } + if (arch == LLM_ARCH_SMALLTHINKER || arch == LLM_ARCH_LFM2MOE) { LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp); LLAMA_LOG_INFO("%s: expert_gating_func = %s\n", __func__, llama_expert_gating_func_name((llama_expert_gating_func_type) hparams.expert_gating_func)); @@ -17043,6 +17166,150 @@ struct llm_build_bailingmoe : public llm_graph_context { } }; +struct llm_build_bailingmoe2 : public llm_graph_context { + llm_build_bailingmoe2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + const int n_transformer_layers = n_layer - hparams.nextn_predict_layers; + for (int il = 0; il < n_transformer_layers; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self_attention + { + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 0*sizeof(float)*(n_embd)); + ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd)); + ggml_tensor * Vcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)); + + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); + cb(Qcur, "Qcur_normed", il); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); + cb(Kcur, "Kcur_normed", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_transformer_layers - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * sa_out = ggml_add(ctx0, cur, inpSA); + cb(sa_out, "sa_out", il); + + // MoE branch + cur = build_norm(sa_out, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + if (static_cast(il) < hparams.n_layer_dense_lead) { + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } else { + ggml_tensor * moe_out = + build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + model.layers[il].ffn_exp_probs_b, + n_expert, n_expert_used, + LLM_FFN_SILU, hparams.expert_weights_norm, + true, hparams.expert_weights_scale, + (llama_expert_gating_func_type) hparams.expert_gating_func, + il); + cb(moe_out, "ffn_moe_out", il); + + { + ggml_tensor * ffn_shexp = build_ffn(cur, + model.layers[il].ffn_up_shexp, NULL, NULL, + model.layers[il].ffn_gate_shexp, NULL, NULL, + model.layers[il].ffn_down_shexp, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(ffn_shexp, "ffn_shexp", il); + + cur = ggml_add(ctx0, moe_out, ffn_shexp); + cb(cur, "ffn_out", il); + } + } + + cur = ggml_add(ctx0, cur, sa_out); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + struct llm_build_dots1 : public llm_graph_context { llm_build_dots1(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; @@ -17698,6 +17965,8 @@ struct llm_build_plamo2 : public llm_graph_context_mamba { cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1); cb(cur, "result_norm", -1); + res->t_embd = cur; + // lm_head cur = build_lora_mm(model.output, cur); cb(cur, "result_output", -1); @@ -19839,6 +20108,10 @@ ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const { { llm = std::make_unique(*this, params); } break; + case LLM_ARCH_BAILINGMOE2: + { + llm = std::make_unique(*this, params); + } break; case LLM_ARCH_SEED_OSS: { llm = std::make_unique(*this, params); @@ -20105,6 +20378,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_EXAONE: case LLM_ARCH_EXAONE4: case LLM_ARCH_MINICPM3: + case LLM_ARCH_BAILINGMOE2: case LLM_ARCH_DOTS1: case LLM_ARCH_HUNYUAN_MOE: case LLM_ARCH_OPENAI_MOE: diff --git a/src/llama-model.h b/src/llama-model.h index 7f48662f2807a..248f854101cd7 100644 --- a/src/llama-model.h +++ b/src/llama-model.h @@ -107,9 +107,12 @@ enum llm_type { LLM_TYPE_17B_16E, // llama4 Scout LLM_TYPE_17B_128E, // llama4 Maverick LLM_TYPE_A13B, + LLM_TYPE_7B_A1B, LLM_TYPE_8B_A1B, // lfm2moe + LLM_TYPE_16B_A1B, LLM_TYPE_21B_A3B, // Ernie MoE small LLM_TYPE_30B_A3B, + LLM_TYPE_100B_A6B, LLM_TYPE_106B_A12B, // GLM-4.5-Air LLM_TYPE_235B_A22B, LLM_TYPE_300B_A47B, // Ernie MoE big diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 7fffd171491aa..639fecbd31745 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -1968,6 +1968,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { clean_spaces = false; } else if ( tokenizer_pre == "bailingmoe" || + tokenizer_pre == "bailingmoe2" || tokenizer_pre == "llada-moe") { pre_type = LLAMA_VOCAB_PRE_TYPE_BAILINGMOE; clean_spaces = false; diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index d5c5a2a6656ee..9eb2b66879c0b 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -3759,6 +3759,130 @@ struct test_clamp : public test_case { } }; +// GGML_OP_FLOOR +struct test_floor : public test_case { + const ggml_type type; + const std::array ne; + + std::string vars() override { + return VARS_TO_STR2(type, ne); + } + + test_floor(ggml_type type = GGML_TYPE_F32, + std::array ne = {10, 2, 2, 2}) + : type(type), ne(ne) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); + ggml_set_param(a); + ggml_set_name(a, "a"); + + ggml_tensor * out = ggml_floor(ctx, a); + ggml_set_name(out, "out"); + + return out; + } + + void initialize_tensors(ggml_context * ctx) override { + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { + init_tensor_uniform(t, -10.0f, 10.0f); + } + } +}; + +// GGML_OP_CEIL +struct test_ceil : public test_case { + const ggml_type type; + const std::array ne; + + std::string vars() override { + return VARS_TO_STR2(type, ne); + } + + test_ceil(ggml_type type = GGML_TYPE_F32, + std::array ne = {10, 2, 2, 2}) + : type(type), ne(ne) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); + ggml_set_param(a); + ggml_set_name(a, "a"); + + ggml_tensor * out = ggml_ceil(ctx, a); + ggml_set_name(out, "out"); + + return out; + } + + void initialize_tensors(ggml_context * ctx) override { + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { + init_tensor_uniform(t, -10.0f, 10.0f); + } + } +}; + +// GGML_OP_ROUND +struct test_round : public test_case { + const ggml_type type; + const std::array ne; + + std::string vars() override { + return VARS_TO_STR2(type, ne); + } + + test_round(ggml_type type = GGML_TYPE_F32, + std::array ne = {10, 2, 2, 2}) + : type(type), ne(ne) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); + ggml_set_param(a); + ggml_set_name(a, "a"); + + ggml_tensor * out = ggml_round(ctx, a); + ggml_set_name(out, "out"); + + return out; + } + + void initialize_tensors(ggml_context * ctx) override { + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { + init_tensor_uniform(t, -10.0f, 10.0f); + } + } +}; + +// GGML_OP_TRUNC +struct test_trunc : public test_case { + const ggml_type type; + const std::array ne; + + std::string vars() override { + return VARS_TO_STR2(type, ne); + } + + test_trunc(ggml_type type = GGML_TYPE_F32, + std::array ne = {10, 2, 2, 2}) + : type(type), ne(ne) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); + ggml_set_param(a); + ggml_set_name(a, "a"); + + ggml_tensor * out = ggml_trunc(ctx, a); + ggml_set_name(out, "out"); + + return out; + } + + void initialize_tensors(ggml_context * ctx) override { + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { + init_tensor_uniform(t, -10.0f, 10.0f); + } + } +}; + // GGML_OP_DIAG_MASK_INF struct test_diag_mask_inf : public test_case { const ggml_type type; @@ -4545,14 +4669,21 @@ struct test_topk_moe: public test_case { const std::array ne; const int n_expert_used; const bool with_norm; - test_topk_moe(std::array ne = {10, 5, 1, 1}, int n_expert_used = 1, bool with_norm = false) - : ne(ne), n_expert_used(n_expert_used), with_norm(with_norm) { + const bool delayed_softmax; + + test_topk_moe(std::array ne = { 10, 5, 1, 1 }, + int n_expert_used = 1, + bool with_norm = false, + bool delayed_softmax = false) : + ne(ne), + n_expert_used(n_expert_used), + with_norm(with_norm), + delayed_softmax(delayed_softmax) { GGML_ASSERT(n_expert_used <= ne[0]); + GGML_ASSERT(!(with_norm && delayed_softmax)); } - std::string vars() override { - return VARS_TO_STR3(ne, n_expert_used, with_norm); - } + std::string vars() override { return VARS_TO_STR4(ne, n_expert_used, with_norm, delayed_softmax); } std::string op_desc(ggml_tensor * t) override { GGML_UNUSED(t); @@ -4566,11 +4697,17 @@ struct test_topk_moe: public test_case { const int n_tokens = ne[1]; ggml_tensor * logits = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne.data()); - ggml_tensor * probs = ggml_soft_max(ctx, logits); + ggml_tensor * probs = delayed_softmax ? logits : ggml_soft_max(ctx, logits); ggml_tensor * selected_experts = ggml_top_k(ctx, probs, n_expert_used); // [n_expert_used, n_tokens] ggml_tensor * out = ggml_get_rows(ctx, ggml_reshape_3d(ctx, probs, 1, n_expert, n_tokens), selected_experts); // [1, n_expert_used, n_tokens] + if (delayed_softmax) { + out = ggml_reshape_2d(ctx, out, n_expert_used, n_tokens); + out = ggml_soft_max(ctx, out); // [n_expert_used, n_tokens] + out = ggml_reshape_3d(ctx, out, 1, n_expert_used, n_tokens); + } + if (with_norm) { out = ggml_reshape_2d(ctx, out, n_expert_used, n_tokens); ggml_tensor * weights_sum = ggml_sum_rows(ctx, out); // [1, n_tokens] @@ -6270,6 +6407,7 @@ static std::vector> make_test_cases_eval() { add_test_bin_bcast(type, {1, 1, 640, 1}, {32, 32, 1, 1}); add_test_bin_bcast(type, {5120, 1, 1, 1}, {1, 256, 1, 1}); add_test_bin_bcast(type, {640, 1, 1, 1}, {1, 1, 1, 1}); + add_test_bin_bcast(type, {64, 262144, 1, 1}, {1, 1, 1, 1}); //add_test_bin_bcast(type, {3, 3, 2560, 1280}, {1, 1, 1, 1}); //add_test_bin_bcast(type, {3, 3, 2560, 1280}, {2, 1, 1, 1}); } @@ -6585,6 +6723,10 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_cos (type)); test_cases.emplace_back(new test_clamp (type)); test_cases.emplace_back(new test_leaky_relu(type)); + test_cases.emplace_back(new test_floor (type)); + test_cases.emplace_back(new test_ceil (type)); + test_cases.emplace_back(new test_round (type)); + test_cases.emplace_back(new test_trunc (type)); test_cases.emplace_back(new test_sqr (type, {7, 1, 5, 3})); test_cases.emplace_back(new test_sqrt (type, {7, 1, 5, 3})); test_cases.emplace_back(new test_log (type, {7, 1, 5, 3})); @@ -6592,6 +6734,10 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_cos (type, {7, 1, 5, 3})); test_cases.emplace_back(new test_clamp (type, {7, 1, 5, 3})); test_cases.emplace_back(new test_leaky_relu(type, {7, 1, 5, 3})); + test_cases.emplace_back(new test_floor (type, {7, 1, 5, 3})); + test_cases.emplace_back(new test_ceil (type, {7, 1, 5, 3})); + test_cases.emplace_back(new test_round (type, {7, 1, 5, 3})); + test_cases.emplace_back(new test_trunc (type, {7, 1, 5, 3})); } test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 1, 1}, 5)); @@ -6843,6 +6989,9 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_topk_moe({128, 1, 1, 1}, 128, with_norm)); } + test_cases.emplace_back(new test_topk_moe({ 8, 22, 1, 1 }, 4, /*with_norm*/ false, /*delayed_softmax*/ true)); + test_cases.emplace_back(new test_topk_moe({ 32, 22, 1, 1 }, 8, /*with_norm*/ false, /*delayed_softmax*/ true)); + #if 0 // these tests are disabled to save execution time, sbut they can be handy for debugging test_cases.emplace_back(new test_llama(2, true)); @@ -6989,6 +7138,8 @@ static std::vector> make_test_cases_perf() { test_cases.emplace_back(new test_conv_2d_dw({512, 512, 256, 1}, {3, 3, 1, 256}, 1, 1, 1, true)); test_cases.emplace_back(new test_conv_transpose_2d({256, 256, 256, 1}, {3, 3, 16, 256}, 1)); + test_cases.emplace_back(new test_conv_transpose_2d({16, 16, 16, 1}, {3, 3, 8, 16}, 1)); + test_cases.emplace_back(new test_conv_transpose_2d({10, 10, 9, 1}, {3, 3, 1, 9}, 2)); test_cases.emplace_back(new test_mean(GGML_TYPE_F32, {256, 256, 3, 1})); diff --git a/tests/test-grammar-integration.cpp b/tests/test-grammar-integration.cpp index 6d64f07376fb8..82fae671ed00b 100644 --- a/tests/test-grammar-integration.cpp +++ b/tests/test-grammar-integration.cpp @@ -301,6 +301,30 @@ static void test_simple_grammar() { "0123", } ); + test_schema( + "min 1 max 900719925474091", + // Schema + R"""({ + "type": "integer", + "exclusiveMinimum": 0, + "maximum": 900719925474091 + })""", + // Passing strings + { + "1", + "2", + "10", + "900719925474090", + "900719925474091", + }, + // Failing strings + { + "0", + "01", + "900719925474092", + "9007199254740910", + } + ); test_schema( "min -1 max 1", R"""({ diff --git a/tests/test-thread-safety.cpp b/tests/test-thread-safety.cpp index 853495b00d9d2..e5158fb5062f0 100644 --- a/tests/test-thread-safety.cpp +++ b/tests/test-thread-safety.cpp @@ -3,6 +3,7 @@ // - Creates n_parallel (--parallel) contexts per model // - Runs inference in parallel on each context +#include #include #include #include @@ -38,13 +39,14 @@ int main(int argc, char ** argv) { cparams.n_seq_max = 1; int dev_count = ggml_backend_dev_count(); - int gpu_dev_count = 0; + std::vector> gpus; for (int i = 0; i < dev_count; ++i) { auto * dev = ggml_backend_dev_get(i); if (dev && ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) { - gpu_dev_count++; + gpus.push_back({dev, nullptr}); } } + const int gpu_dev_count = (int)gpus.size(); const int num_models = gpu_dev_count + 1 + 1; // GPUs + 1 CPU model + 1 layer split //const int num_models = std::max(1, gpu_dev_count); const int num_contexts = std::max(1, params.n_parallel); @@ -58,12 +60,12 @@ int main(int argc, char ** argv) { if (m < gpu_dev_count) { mparams.split_mode = LLAMA_SPLIT_MODE_NONE; - mparams.main_gpu = m; + mparams.devices = gpus[m].data(); } else if (m == gpu_dev_count) { mparams.split_mode = LLAMA_SPLIT_MODE_NONE; mparams.main_gpu = -1; // CPU model } else { - mparams.split_mode = LLAMA_SPLIT_MODE_LAYER;; + mparams.split_mode = LLAMA_SPLIT_MODE_LAYER; } llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams); diff --git a/tools/imatrix/CMakeLists.txt b/tools/imatrix/CMakeLists.txt index 22f2fe5fdb828..5af6263f9851d 100644 --- a/tools/imatrix/CMakeLists.txt +++ b/tools/imatrix/CMakeLists.txt @@ -6,3 +6,8 @@ target_compile_features(${TARGET} PRIVATE cxx_std_17) if(LLAMA_TOOLS_INSTALL) install(TARGETS ${TARGET} RUNTIME) endif() + +if (CMAKE_SYSTEM_NAME MATCHES "AIX") + # AIX's flock() function comes from libbsd.a + target_link_libraries(${TARGET} PRIVATE -lbsd) +endif() diff --git a/tools/mtmd/clip-impl.h b/tools/mtmd/clip-impl.h index 7a7523851c117..1669fad99b36b 100644 --- a/tools/mtmd/clip-impl.h +++ b/tools/mtmd/clip-impl.h @@ -30,6 +30,7 @@ #define KEY_LAYER_NORM_EPS "clip.%s.attention.layer_norm_epsilon" // vision-specific +#define KEY_VISION_PROJ_TYPE "clip.vision.projector_type" // for models with mixed modalities #define KEY_IMAGE_SIZE "clip.vision.image_size" #define KEY_PREPROC_IMAGE_SIZE "clip.vision.preproc_image_size" #define KEY_PATCH_SIZE "clip.vision.patch_size" @@ -48,6 +49,7 @@ #define KEY_MINICPMV_QUERY_NUM "clip.minicpmv_query_num" // audio-specific +#define KEY_AUDIO_PROJ_TYPE "clip.audio.projector_type" // for models with mixed modalities #define KEY_A_NUM_MEL_BINS "clip.audio.num_mel_bins" #define KEY_A_PROJ_STACK_FACTOR "clip.audio.projector.stack_factor" diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 98e68af27a690..f2abf88523843 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -2221,15 +2221,27 @@ struct clip_model_loader { // projector type std::string proj_type; { + // default key get_string(KEY_PROJ_TYPE, proj_type, false); - if (!proj_type.empty()) { - model.proj_type = clip_projector_type_from_string(proj_type); + + // for models with mixed modalities + if (proj_type.empty()) { + if (modality == CLIP_MODALITY_VISION) { + get_string(KEY_VISION_PROJ_TYPE, proj_type, false); + } else if (modality == CLIP_MODALITY_AUDIO) { + get_string(KEY_AUDIO_PROJ_TYPE, proj_type, false); + } else { + GGML_ABORT("unknown modality"); + } } + + model.proj_type = clip_projector_type_from_string(proj_type); + if (model.proj_type == PROJECTOR_TYPE_UNKNOWN) { throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str())); } - // correct arch for multimodal models + // correct arch for multimodal models (legacy method) if (model.proj_type == PROJECTOR_TYPE_QWEN25O) { model.proj_type = modality == CLIP_MODALITY_VISION ? PROJECTOR_TYPE_QWEN25VL diff --git a/tools/mtmd/mtmd-cli.cpp b/tools/mtmd/mtmd-cli.cpp index 5fde6ca0c32ae..fd1fb6581b163 100644 --- a/tools/mtmd/mtmd-cli.cpp +++ b/tools/mtmd/mtmd-cli.cpp @@ -76,9 +76,11 @@ struct mtmd_cli_context { mtmd::bitmaps bitmaps; - // note: we know that gemma3 template is "linear", meaning each turn is completely separated to another - // so here we don't need to keep track of chat history + // chat template common_chat_templates_ptr tmpls; + std::vector chat_history; + bool use_jinja = false; + // TODO: support for --system-prompt with /clear command // support for legacy templates (models not having EOT token) llama_tokens antiprompt_tokens; @@ -108,6 +110,8 @@ struct mtmd_cli_context { } tmpls = common_chat_templates_init(model, params.chat_template); + use_jinja = params.use_jinja; + chat_history.clear(); LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(tmpls.get(), params.use_jinja, params.default_template_kwargs).c_str()); init_vision_context(params); @@ -193,19 +197,33 @@ static int generate_response(mtmd_cli_context & ctx, int n_predict) { return 1; } } + + std::string generated_text = common_detokenize(ctx.lctx, generated_tokens); + common_chat_msg msg; + msg.role = "assistant"; + msg.content = generated_text; + ctx.chat_history.push_back(std::move(msg)); + return 0; } -static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg, bool add_bos = false) { - common_chat_templates_inputs tmpl_inputs; - tmpl_inputs.messages = {msg}; - tmpl_inputs.add_generation_prompt = true; - tmpl_inputs.use_jinja = false; // jinja is buggy here - auto formatted_chat = common_chat_templates_apply(ctx.tmpls.get(), tmpl_inputs); - LOG_DBG("formatted_chat.prompt: %s\n", formatted_chat.prompt.c_str()); +static std::string chat_add_and_format(mtmd_cli_context & ctx, common_chat_msg & new_msg) { + LOG_DBG("chat_add_and_format: new_msg.role='%s', new_msg.content='%s'\n", + new_msg.role.c_str(), new_msg.content.c_str()); + auto formatted = common_chat_format_single(ctx.tmpls.get(), ctx.chat_history, + new_msg, new_msg.role == "user", + ctx.use_jinja); + ctx.chat_history.push_back(new_msg); + return formatted; +} + +static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg) { + bool add_bos = ctx.chat_history.empty(); + auto formatted_chat = chat_add_and_format(ctx, msg); + LOG_DBG("formatted_chat.prompt: %s\n", formatted_chat.c_str()); mtmd_input_text text; - text.text = formatted_chat.prompt.c_str(); + text.text = formatted_chat.c_str(); text.add_special = add_bos; text.parse_special = true; @@ -303,7 +321,7 @@ int main(int argc, char ** argv) { return 1; // error is already printed by libmtmd } } - if (eval_message(ctx, msg, true)) { + if (eval_message(ctx, msg)) { return 1; } if (!g_is_interrupted && generate_response(ctx, n_predict)) { @@ -322,7 +340,6 @@ int main(int argc, char ** argv) { LOG("\n /quit or /exit exit the program"); LOG("\n"); - bool is_first_msg = true; std::string content; while (!g_is_interrupted) { @@ -342,7 +359,8 @@ int main(int argc, char ** argv) { } if (line == "/clear") { ctx.n_past = 0; - llama_memory_seq_rm(llama_get_memory(ctx.lctx), 0, 1, -1); // keep BOS + ctx.chat_history.clear(); + llama_memory_clear(llama_get_memory(ctx.lctx), true); LOG("Chat history cleared\n\n"); continue; } @@ -367,7 +385,7 @@ int main(int argc, char ** argv) { common_chat_msg msg; msg.role = "user"; msg.content = content; - int ret = eval_message(ctx, msg, is_first_msg); + int ret = eval_message(ctx, msg); if (ret) { return 1; } @@ -376,7 +394,6 @@ int main(int argc, char ** argv) { return 1; } content.clear(); - is_first_msg = false; } } if (g_is_interrupted) LOG("\nInterrupted by user\n"); diff --git a/tools/rpc/rpc-server.cpp b/tools/rpc/rpc-server.cpp index 088515612772d..58b93c7468ea3 100644 --- a/tools/rpc/rpc-server.cpp +++ b/tools/rpc/rpc-server.cpp @@ -137,7 +137,6 @@ struct rpc_server_params { bool use_cache = false; int n_threads = std::max(1U, std::thread::hardware_concurrency()/2); std::vector devices; - std::vector dev_mem; }; static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) { @@ -148,7 +147,6 @@ static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) { fprintf(stderr, " -d, --device comma-separated list of devices\n"); fprintf(stderr, " -H, --host HOST host to bind to (default: %s)\n", params.host.c_str()); fprintf(stderr, " -p, --port PORT port to bind to (default: %d)\n", params.port); - fprintf(stderr, " -m, --mem memory size for each device (in MB)\n"); fprintf(stderr, " -c, --cache enable local file cache\n"); fprintf(stderr, "\n"); } @@ -197,23 +195,6 @@ static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & } } else if (arg == "-c" || arg == "--cache") { params.use_cache = true; - } else if (arg == "-m" || arg == "--mem") { - if (++i >= argc) { - return false; - } - const std::regex regex{ R"([,/]+)" }; - std::string mem_str = argv[i]; - std::sregex_token_iterator iter(mem_str.begin(), mem_str.end(), regex, -1); - std::sregex_token_iterator end; - for ( ; iter != end; ++iter) { - try { - size_t mem = std::stoul(*iter) * 1024 * 1024; - params.dev_mem.push_back(mem); - } catch (const std::exception & ) { - fprintf(stderr, "error: invalid memory size: %s\n", iter->str().c_str()); - return false; - } - } } else if (arg == "-h" || arg == "--help") { print_usage(argc, argv, params); exit(0); @@ -293,18 +274,6 @@ int main(int argc, char * argv[]) { return 1; } std::string endpoint = params.host + ":" + std::to_string(params.port); - std::vector free_mem, total_mem; - for (size_t i = 0; i < devices.size(); i++) { - if (i < params.dev_mem.size()) { - free_mem.push_back(params.dev_mem[i]); - total_mem.push_back(params.dev_mem[i]); - } else { - size_t free, total; - ggml_backend_dev_memory(devices[i], &free, &total); - free_mem.push_back(free); - total_mem.push_back(total); - } - } const char * cache_dir = nullptr; std::string cache_dir_str; if (params.use_cache) { @@ -328,7 +297,6 @@ int main(int argc, char * argv[]) { return 1; } - start_server_fn(endpoint.c_str(), cache_dir, params.n_threads, devices.size(), - devices.data(), free_mem.data(), total_mem.data()); + start_server_fn(endpoint.c_str(), cache_dir, params.n_threads, devices.size(), devices.data()); return 0; } diff --git a/tools/run/CMakeLists.txt b/tools/run/CMakeLists.txt index e52294ccc0ef9..6ad7534e290bc 100644 --- a/tools/run/CMakeLists.txt +++ b/tools/run/CMakeLists.txt @@ -13,5 +13,11 @@ endif () if(LLAMA_TOOLS_INSTALL) install(TARGETS ${TARGET} RUNTIME) endif() + +if (CMAKE_SYSTEM_NAME MATCHES "AIX") + # AIX's flock() function comes from libbsd.a + target_link_libraries(${TARGET} PRIVATE -lbsd) +endif() + target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT} ${LLAMA_RUN_EXTRA_LIBS}) target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/tools/server/public/index.html.gz b/tools/server/public/index.html.gz index 1c62ebe968605..026b53b28632f 100644 Binary files a/tools/server/public/index.html.gz and b/tools/server/public/index.html.gz differ diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 8737fba124d50..4124bffa40f85 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -2839,7 +2839,7 @@ struct server_context { slot.generated_text.begin() + pos + stop_pos, slot.generated_text.end()); pos = std::min(slot.n_sent_text, slot.generated_text.size()); - } else if (slot.has_next_token) { + } else if (slot.has_next_token && !llama_vocab_is_eog(vocab, result.tok) ) { stop_pos = slot.find_stopping_strings(str_test, token_str.size(), false); send_text = stop_pos == std::string::npos; } @@ -5714,6 +5714,7 @@ int main(int argc, char ** argv) { clean_up(); t.join(); + llama_memory_breakdown_print(ctx_server.ctx); return 0; } diff --git a/tools/server/webui/package-lock.json b/tools/server/webui/package-lock.json index 9cd6ef9138c95..f86b9282c9bb6 100644 --- a/tools/server/webui/package-lock.json +++ b/tools/server/webui/package-lock.json @@ -50,6 +50,7 @@ "eslint-plugin-svelte": "^3.0.0", "fflate": "^0.8.2", "globals": "^16.0.0", + "http-server": "^14.1.1", "mdast": "^3.0.0", "mdsvex": "^0.12.3", "playwright": "^1.53.0", @@ -2979,6 +2980,13 @@ "node": ">=4" } }, + "node_modules/async": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", + "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==", + "dev": true, + "license": "MIT" + }, "node_modules/axe-core": { "version": "4.10.3", "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.10.3.tgz", @@ -3015,6 +3023,19 @@ "dev": true, "license": "MIT" }, + "node_modules/basic-auth": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz", + "integrity": "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "5.1.2" + }, + "engines": { + "node": ">= 0.8" + } + }, "node_modules/better-opn": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/better-opn/-/better-opn-3.0.2.tgz", @@ -3125,6 +3146,37 @@ "node": ">=8" } }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -3335,6 +3387,16 @@ "node": ">= 0.6" } }, + "node_modules/corser": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/corser/-/corser-2.0.1.tgz", + "integrity": "sha512-utCYNzRSQIZNPIcGZdQc92UVJYAhtGAteCFg0yRaFm8f0P+CPtyGyHXJcGXnffjCybUCEx3FQ2G7U3/o9eIkVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -3520,6 +3582,21 @@ "dev": true, "license": "MIT" }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/enhanced-resolve": { "version": "5.18.2", "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.2.tgz", @@ -3547,6 +3624,26 @@ "url": "https://github.com/fb55/entities?sponsor=1" } }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/es-module-lexer": { "version": "1.7.0", "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", @@ -3554,6 +3651,19 @@ "dev": true, "license": "MIT" }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/es-toolkit": { "version": "1.39.7", "resolved": "https://registry.npmjs.org/es-toolkit/-/es-toolkit-1.39.7.tgz", @@ -3885,6 +3995,13 @@ "node": ">=0.10.0" } }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "dev": true, + "license": "MIT" + }, "node_modules/expect-type": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.2.tgz", @@ -4058,6 +4175,27 @@ "dev": true, "license": "ISC" }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, "node_modules/fsevents": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", @@ -4073,6 +4211,55 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -4099,6 +4286,19 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/graceful-fs": { "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", @@ -4123,6 +4323,32 @@ "node": ">=8" } }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/hast-util-from-dom": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/hast-util-from-dom/-/hast-util-from-dom-5.0.1.tgz", @@ -4363,6 +4589,16 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, "node_modules/highlight.js": { "version": "11.11.1", "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-11.11.1.tgz", @@ -4372,6 +4608,19 @@ "node": ">=12.0.0" } }, + "node_modules/html-encoding-sniffer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz", + "integrity": "sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-encoding": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, "node_modules/html-void-elements": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", @@ -4382,6 +4631,62 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-server": { + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/http-server/-/http-server-14.1.1.tgz", + "integrity": "sha512-+cbxadF40UXd9T01zUHgA+rlo2Bg1Srer4+B4NwIHdaGxAGGv59nYRnGGDJ9LBk7alpS0US+J+bLLdQOOkJq4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "basic-auth": "^2.0.1", + "chalk": "^4.1.2", + "corser": "^2.0.1", + "he": "^1.2.0", + "html-encoding-sniffer": "^3.0.0", + "http-proxy": "^1.18.1", + "mime": "^1.6.0", + "minimist": "^1.2.6", + "opener": "^1.5.1", + "portfinder": "^1.0.28", + "secure-compare": "3.0.1", + "union": "~0.5.0", + "url-join": "^4.0.1" + }, + "bin": { + "http-server": "bin/http-server" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/ignore": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", @@ -5008,6 +5313,16 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/mdast": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/mdast/-/mdast-3.0.0.tgz", @@ -5976,6 +6291,19 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "dev": true, + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/min-indent": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", @@ -6009,6 +6337,16 @@ "node": "*" } }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/minipass": { "version": "7.1.2", "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", @@ -6124,6 +6462,19 @@ "tslib": "^2.0.3" } }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/open": { "version": "8.4.2", "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", @@ -6142,6 +6493,16 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/opener": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "dev": true, + "license": "(WTFPL OR MIT)", + "bin": { + "opener": "bin/opener-bin.js" + } + }, "node_modules/optionator": { "version": "0.9.4", "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", @@ -6330,6 +6691,20 @@ "node": ">=18" } }, + "node_modules/portfinder": { + "version": "1.0.38", + "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.38.tgz", + "integrity": "sha512-rEwq/ZHlJIKw++XtLAO8PPuOQA/zaPJOZJ37BVuN97nLpMJeuDVLVGRwbFoBgLudgdTMP2hdRJP++H+8QOA3vg==", + "dev": true, + "license": "MIT", + "dependencies": { + "async": "^3.2.6", + "debug": "^4.3.6" + }, + "engines": { + "node": ">= 10.12" + } + }, "node_modules/postcss": { "version": "8.5.6", "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", @@ -6680,6 +7055,22 @@ "node": ">=6" } }, + "node_modules/qs": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.0.tgz", + "integrity": "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -6959,6 +7350,13 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "dev": true, + "license": "MIT" + }, "node_modules/resolve-from": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", @@ -7072,6 +7470,20 @@ "node": ">=6" } }, + "node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, "node_modules/scheduler": { "version": "0.26.0", "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.26.0.tgz", @@ -7079,6 +7491,13 @@ "dev": true, "license": "MIT" }, + "node_modules/secure-compare": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/secure-compare/-/secure-compare-3.0.1.tgz", + "integrity": "sha512-AckIIV90rPDcBcglUwXPF3kg0P0qmPsPXAj6BBEENQE1p5yA1xfmDJzfi1Tappj37Pv2mVbKpL3Z1T+Nn7k1Qw==", + "dev": true, + "license": "MIT" + }, "node_modules/semver": { "version": "7.7.2", "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", @@ -7122,6 +7541,82 @@ "node": ">=8" } }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/siginfo": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", @@ -7904,6 +8399,18 @@ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", "license": "MIT" }, + "node_modules/union": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/union/-/union-0.5.0.tgz", + "integrity": "sha512-N6uOhuW6zO95P3Mel2I2zMsbsanvvtgn6jVqJv4vbVcz/JN0OkL9suomjQGmWtxJQXOCqUJvquc1sMeNz/IwlA==", + "dev": true, + "dependencies": { + "qs": "^6.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/unist-util-find-after": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/unist-util-find-after/-/unist-util-find-after-5.0.0.tgz", @@ -8073,6 +8580,13 @@ "punycode": "^2.1.0" } }, + "node_modules/url-join": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz", + "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==", + "dev": true, + "license": "MIT" + }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", @@ -8447,6 +8961,19 @@ "dev": true, "license": "MIT" }, + "node_modules/whatwg-encoding": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz", + "integrity": "sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=12" + } + }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", diff --git a/tools/server/webui/package.json b/tools/server/webui/package.json index e073cd32f07e1..376f69015261b 100644 --- a/tools/server/webui/package.json +++ b/tools/server/webui/package.json @@ -52,6 +52,7 @@ "eslint-plugin-svelte": "^3.0.0", "fflate": "^0.8.2", "globals": "^16.0.0", + "http-server": "^14.1.1", "mdast": "^3.0.0", "mdsvex": "^0.12.3", "playwright": "^1.53.0", diff --git a/tools/server/webui/playwright.config.ts b/tools/server/webui/playwright.config.ts index 90ca19b09f3ed..51688b394106a 100644 --- a/tools/server/webui/playwright.config.ts +++ b/tools/server/webui/playwright.config.ts @@ -2,8 +2,10 @@ import { defineConfig } from '@playwright/test'; export default defineConfig({ webServer: { - command: 'npm run build && npx http-server ../public -p 8181', - port: 8181 + command: 'npm run build && http-server ../public -p 8181', + port: 8181, + timeout: 120000, + reuseExistingServer: false }, testDir: 'e2e' }); diff --git a/tools/server/webui/src/app.d.ts b/tools/server/webui/src/app.d.ts index e9bb140939886..eb14d6fe45143 100644 --- a/tools/server/webui/src/app.d.ts +++ b/tools/server/webui/src/app.d.ts @@ -31,7 +31,8 @@ import type { DatabaseMessageExtraAudioFile, DatabaseMessageExtraImageFile, DatabaseMessageExtraTextFile, - DatabaseMessageExtraPdfFile + DatabaseMessageExtraPdfFile, + DatabaseMessageExtraLegacyContext } from '$lib/types/database'; import type { @@ -73,6 +74,7 @@ declare global { DatabaseMessageExtraImageFile, DatabaseMessageExtraTextFile, DatabaseMessageExtraPdfFile, + DatabaseMessageExtraLegacyContext, SettingsConfigValue, SettingsFieldConfig, SettingsConfigType, diff --git a/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentsList.svelte b/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentsList.svelte index 0007c4c0b4597..e378139d1b626 100644 --- a/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentsList.svelte +++ b/tools/server/webui/src/lib/components/app/chat/ChatAttachments/ChatAttachmentsList.svelte @@ -94,6 +94,17 @@ attachmentIndex: index, textContent: attachment.content }); + } else if (attachment.type === 'context') { + // Legacy format from old webui - treat as text file + items.push({ + id: `attachment-${index}`, + name: attachment.name, + type: 'text', + isImage: false, + attachment, + attachmentIndex: index, + textContent: attachment.content + }); } else if (attachment.type === 'audioFile') { items.push({ id: `attachment-${index}`, diff --git a/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatForm.svelte b/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatForm.svelte index 6a7c0dd366e40..67a7fff54cb6b 100644 --- a/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatForm.svelte +++ b/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatForm.svelte @@ -26,6 +26,7 @@ MimeTypeImage, MimeTypeText } from '$lib/enums/files'; + import { isIMEComposing } from '$lib/utils/is-ime-composing'; interface Props { class?: string; @@ -97,7 +98,7 @@ } async function handleKeydown(event: KeyboardEvent) { - if (event.key === 'Enter' && !event.shiftKey) { + if (event.key === 'Enter' && !event.shiftKey && !isIMEComposing(event)) { event.preventDefault(); if ((!message.trim() && uploadedFiles.length === 0) || disabled || isLoading) return; diff --git a/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions.svelte b/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions.svelte index a6f3c7320826f..ef03f73f8de8f 100644 --- a/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions.svelte +++ b/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions.svelte @@ -3,6 +3,8 @@ import { Button } from '$lib/components/ui/button'; import ChatFormActionFileAttachments from './ChatFormActionFileAttachments.svelte'; import ChatFormActionRecord from './ChatFormActionRecord.svelte'; + import ChatFormModelSelector from './ChatFormModelSelector.svelte'; + import { config } from '$lib/stores/settings.svelte'; import type { FileTypeCategory } from '$lib/enums/files'; interface Props { @@ -26,32 +28,36 @@ onMicClick, onStop }: Props = $props(); + + let currentConfig = $derived(config()); -
- +
+ + + {#if currentConfig.modelSelectorEnabled} + + {/if} -
- {#if isLoading} - - {:else} - + {#if isLoading} + + {:else} + - - {/if} -
+ + {/if}
diff --git a/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormModelSelector.svelte b/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormModelSelector.svelte new file mode 100644 index 0000000000000..689415f8df84b --- /dev/null +++ b/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormModelSelector.svelte @@ -0,0 +1,358 @@ + + + + + + +
+ {#if loading && options.length === 0 && !isMounted} +
+ + Loading models… +
+ {:else if options.length === 0} +

No models available.

+ {:else} + {@const selectedOption = getDisplayOption()} + +
+ + + {#if isOpen} +
+
0 + ? `${menuPosition.maxHeight}px` + : undefined} + > + {#each options as option (option.id)} + + {/each} +
+
+ {/if} +
+ {/if} + + {#if error} +

{error}

+ {/if} +
diff --git a/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessage.svelte b/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessage.svelte index fed0cf712695f..7ade6bc61f333 100644 --- a/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessage.svelte +++ b/tools/server/webui/src/lib/components/app/chat/ChatMessages/ChatMessage.svelte @@ -1,6 +1,7 @@
{/if} - {#if config().showModelInfo && message.model} + {#if displayedModel()} @@ -150,9 +168,9 @@ diff --git a/tools/server/webui/src/lib/components/app/chat/ChatProcessingInfo.svelte b/tools/server/webui/src/lib/components/app/chat/ChatProcessingInfo.svelte index c10d7dbf1d781..94b27caa369c8 100644 --- a/tools/server/webui/src/lib/components/app/chat/ChatProcessingInfo.svelte +++ b/tools/server/webui/src/lib/components/app/chat/ChatProcessingInfo.svelte @@ -7,18 +7,19 @@ const processingState = useProcessingState(); + let isCurrentConversationLoading = $derived(isLoading()); let processingDetails = $derived(processingState.getProcessingDetails()); + let showSlotsInfo = $derived(isCurrentConversationLoading || config().keepStatsVisible); - let showSlotsInfo = $derived(isLoading() || config().keepStatsVisible); - + // Track loading state reactively by checking if conversation ID is in loading conversations array $effect(() => { const keepStatsVisible = config().keepStatsVisible; - if (keepStatsVisible || isLoading()) { + if (keepStatsVisible || isCurrentConversationLoading) { processingState.startMonitoring(); } - if (!isLoading() && !keepStatsVisible) { + if (!isCurrentConversationLoading && !keepStatsVisible) { setTimeout(() => { if (!config().keepStatsVisible) { processingState.stopMonitoring(); @@ -27,18 +28,20 @@ } }); + // Update processing state from stored timings $effect(() => { - activeConversation(); - + const conversation = activeConversation(); const messages = activeMessages() as DatabaseMessage[]; const keepStatsVisible = config().keepStatsVisible; - if (keepStatsVisible) { + if (keepStatsVisible && conversation) { if (messages.length === 0) { - slotsService.clearState(); + slotsService.clearConversationState(conversation.id); return; } + // Search backwards through messages to find most recent assistant message with timing data + // Using reverse iteration for performance - avoids array copy and stops at first match let foundTimingData = false; for (let i = messages.length - 1; i >= 0; i--) { @@ -47,15 +50,18 @@ foundTimingData = true; slotsService - .updateFromTimingData({ - prompt_n: message.timings.prompt_n || 0, - predicted_n: message.timings.predicted_n || 0, - predicted_per_second: - message.timings.predicted_n && message.timings.predicted_ms - ? (message.timings.predicted_n / message.timings.predicted_ms) * 1000 - : 0, - cache_n: message.timings.cache_n || 0 - }) + .updateFromTimingData( + { + prompt_n: message.timings.prompt_n || 0, + predicted_n: message.timings.predicted_n || 0, + predicted_per_second: + message.timings.predicted_n && message.timings.predicted_ms + ? (message.timings.predicted_n / message.timings.predicted_ms) * 1000 + : 0, + cache_n: message.timings.cache_n || 0 + }, + conversation.id + ) .catch((error) => { console.warn('Failed to update processing state from stored timings:', error); }); @@ -64,7 +70,7 @@ } if (!foundTimingData) { - slotsService.clearState(); + slotsService.clearConversationState(conversation.id); } } }); diff --git a/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreen.svelte b/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreen.svelte index 374eb05ab0f46..16563537cc292 100644 --- a/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreen.svelte +++ b/tools/server/webui/src/lib/components/app/chat/ChatScreen/ChatScreen.svelte @@ -83,6 +83,8 @@ let activeErrorDialog = $derived(errorDialog()); let isServerLoading = $derived(serverLoading()); + let isCurrentConversationLoading = $derived(isLoading()); + async function handleDeleteConfirm() { const conversation = activeConversation(); if (conversation) { @@ -254,7 +256,7 @@ }); $effect(() => { - if (isLoading() && autoScrollEnabled) { + if (isCurrentConversationLoading && autoScrollEnabled) { scrollInterval = setInterval(scrollChatToBottom, AUTO_SCROLL_INTERVAL); } else if (scrollInterval) { clearInterval(scrollInterval); @@ -305,7 +307,7 @@
-
+
-
- -
+ {#if currentSection.title === 'Import/Export'} + + {:else} +
+ +
+ {/if}
diff --git a/tools/server/webui/src/lib/components/app/chat/ChatSettings/ConversationSelectionDialog.svelte b/tools/server/webui/src/lib/components/app/chat/ChatSettings/ConversationSelectionDialog.svelte new file mode 100644 index 0000000000000..bc92a50a26b64 --- /dev/null +++ b/tools/server/webui/src/lib/components/app/chat/ChatSettings/ConversationSelectionDialog.svelte @@ -0,0 +1,249 @@ + + + + + + + + + + Select Conversations to {mode === 'export' ? 'Export' : 'Import'} + + + + {#if mode === 'export'} + Choose which conversations you want to export. Selected conversations will be downloaded + as a JSON file. + {:else} + Choose which conversations you want to import. Selected conversations will be merged + with your existing conversations. + {/if} + + + +
+
+ + + + + {#if searchQuery} + + {/if} +
+ +
+ + {selectedIds.size} of {conversations.length} selected + {#if searchQuery} + ({filteredConversations.length} shown) + {/if} + +
+ +
+ + + + + + + + + + + + + {#if filteredConversations.length === 0} + + + + {:else} + {#each filteredConversations as conv (conv.id)} + toggleConversation(conv.id, e.shiftKey)} + > + + + + + + + {/each} + {/if} + +
+ + Conversation NameMessages
+ {#if searchQuery} + No conversations found matching "{searchQuery}" + {:else} + No conversations available + {/if} +
+ { + e.preventDefault(); + e.stopPropagation(); + toggleConversation(conv.id, e.shiftKey); + }} + /> + +
+ {conv.name || 'Untitled conversation'} +
+
+ {messageCountMap.get(conv.id) ?? 0} +
+
+
+
+ + + + + + +
+
+
diff --git a/tools/server/webui/src/lib/components/app/chat/ChatSettings/ImportExportTab.svelte b/tools/server/webui/src/lib/components/app/chat/ChatSettings/ImportExportTab.svelte new file mode 100644 index 0000000000000..19c982c7b45ea --- /dev/null +++ b/tools/server/webui/src/lib/components/app/chat/ChatSettings/ImportExportTab.svelte @@ -0,0 +1,255 @@ + + +
+
+
+

Export Conversations

+ +

+ Download all your conversations as a JSON file. This includes all messages, attachments, and + conversation history. +

+ + + + {#if showExportSummary && exportedConversations.length > 0} +
+
+ Exported {exportedConversations.length} conversation{exportedConversations.length === 1 + ? '' + : 's'} +
+ +
    + {#each exportedConversations.slice(0, 10) as conv (conv.id)} +
  • • {conv.name || 'Untitled conversation'}
  • + {/each} + + {#if exportedConversations.length > 10} +
  • + ... and {exportedConversations.length - 10} more +
  • + {/if} +
+
+ {/if} +
+ +
+

Import Conversations

+ +

+ Import one or more conversations from a previously exported JSON file. This will merge with + your existing conversations. +

+ + + + {#if showImportSummary && importedConversations.length > 0} +
+
+ Imported {importedConversations.length} conversation{importedConversations.length === 1 + ? '' + : 's'} +
+ +
    + {#each importedConversations.slice(0, 10) as conv (conv.id)} +
  • • {conv.name || 'Untitled conversation'}
  • + {/each} + + {#if importedConversations.length > 10} +
  • + ... and {importedConversations.length - 10} more +
  • + {/if} +
+
+ {/if} +
+
+
+ + (showExportDialog = false)} + onConfirm={handleExportConfirm} +/> + + (showImportDialog = false)} + onConfirm={handleImportConfirm} +/> diff --git a/tools/server/webui/src/lib/components/app/chat/ChatSidebar/ChatSidebarActions.svelte b/tools/server/webui/src/lib/components/app/chat/ChatSidebar/ChatSidebarActions.svelte index e91673e98b036..30d1f9d4b7e98 100644 --- a/tools/server/webui/src/lib/components/app/chat/ChatSidebar/ChatSidebarActions.svelte +++ b/tools/server/webui/src/lib/components/app/chat/ChatSidebar/ChatSidebarActions.svelte @@ -1,9 +1,8 @@ {@render children?.()} - + diff --git a/tools/server/webui/src/lib/constants/localstorage-keys.ts b/tools/server/webui/src/lib/constants/localstorage-keys.ts index 9fcc7bab93d1d..8bdc5f33c38a9 100644 --- a/tools/server/webui/src/lib/constants/localstorage-keys.ts +++ b/tools/server/webui/src/lib/constants/localstorage-keys.ts @@ -1 +1,2 @@ export const SERVER_PROPS_LOCALSTORAGE_KEY = 'LlamaCppWebui.serverProps'; +export const SELECTED_MODEL_LOCALSTORAGE_KEY = 'LlamaCppWebui.selectedModel'; diff --git a/tools/server/webui/src/lib/constants/settings-config.ts b/tools/server/webui/src/lib/constants/settings-config.ts index 154ec888ce2dc..512dcc96997e7 100644 --- a/tools/server/webui/src/lib/constants/settings-config.ts +++ b/tools/server/webui/src/lib/constants/settings-config.ts @@ -13,6 +13,7 @@ export const SETTING_CONFIG_DEFAULT: Record = pdfAsImage: false, showModelInfo: false, renderUserContentAsMarkdown: false, + modelSelectorEnabled: false, // make sure these default values are in sync with `common.h` samplers: 'top_k;typ_p;top_p;min_p;temperature', temperature: 0.8, @@ -86,6 +87,8 @@ export const SETTING_CONFIG_INFO: Record = { pdfAsImage: 'Parse PDF as image instead of text (requires vision-capable model).', showModelInfo: 'Display the model name used to generate each message below the message content.', renderUserContentAsMarkdown: 'Render user messages using markdown formatting in the chat.', + modelSelectorEnabled: + 'Enable the model selector in the chat input to choose the inference model. Sends the associated model field in API requests.', pyInterpreterEnabled: 'Enable Python interpreter using Pyodide. Allows running Python code in markdown code blocks.' }; diff --git a/tools/server/webui/src/lib/services/chat.ts b/tools/server/webui/src/lib/services/chat.ts index 37e60b85b5a6a..df03b10251ac2 100644 --- a/tools/server/webui/src/lib/services/chat.ts +++ b/tools/server/webui/src/lib/services/chat.ts @@ -1,4 +1,5 @@ import { config } from '$lib/stores/settings.svelte'; +import { selectedModelName } from '$lib/stores/models.svelte'; import { slotsService } from './slots'; /** * ChatService - Low-level API communication layer for llama.cpp server interactions @@ -29,7 +30,7 @@ import { slotsService } from './slots'; * - Request lifecycle management (abort, cleanup) */ export class ChatService { - private abortController: AbortController | null = null; + private abortControllers: Map = new Map(); /** * Sends a chat completion request to the llama.cpp server. @@ -43,13 +44,16 @@ export class ChatService { */ async sendMessage( messages: ApiChatMessageData[] | (DatabaseMessage & { extra?: DatabaseMessageExtra[] })[], - options: SettingsChatServiceOptions = {} + options: SettingsChatServiceOptions = {}, + conversationId?: string ): Promise { const { stream, onChunk, onComplete, onError, + onReasoningChunk, + onModel, // Generation parameters temperature, max_tokens, @@ -79,25 +83,25 @@ export class ChatService { const currentConfig = config(); - // Cancel any ongoing request and create a new abort controller - this.abort(); - this.abortController = new AbortController(); + const requestId = conversationId || 'default'; + + if (this.abortControllers.has(requestId)) { + this.abortControllers.get(requestId)?.abort(); + } + + const abortController = new AbortController(); + this.abortControllers.set(requestId, abortController); - // Convert database messages with attachments to API format if needed const normalizedMessages: ApiChatMessageData[] = messages .map((msg) => { - // Check if this is a DatabaseMessage by checking for DatabaseMessage-specific fields if ('id' in msg && 'convId' in msg && 'timestamp' in msg) { - // This is a DatabaseMessage, convert it const dbMsg = msg as DatabaseMessage & { extra?: DatabaseMessageExtra[] }; return ChatService.convertMessageToChatServiceData(dbMsg); } else { - // This is already an ApiChatMessageData object return msg as ApiChatMessageData; } }) .filter((msg) => { - // Filter out empty system messages if (msg.role === 'system') { const content = typeof msg.content === 'string' ? msg.content : ''; @@ -107,7 +111,6 @@ export class ChatService { return true; }); - // Build base request body with system message injection const processedMessages = this.injectSystemMessage(normalizedMessages); const requestBody: ApiChatCompletionRequest = { @@ -118,6 +121,13 @@ export class ChatService { stream }; + const modelSelectorEnabled = Boolean(currentConfig.modelSelectorEnabled); + const activeModel = modelSelectorEnabled ? selectedModelName() : null; + + if (modelSelectorEnabled && activeModel) { + requestBody.model = activeModel; + } + requestBody.reasoning_format = currentConfig.disableReasoningFormat ? 'none' : 'auto'; if (temperature !== undefined) requestBody.temperature = temperature; @@ -172,11 +182,10 @@ export class ChatService { ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}) }, body: JSON.stringify(requestBody), - signal: this.abortController.signal + signal: abortController.signal }); if (!response.ok) { - // Use the new parseErrorResponse method to handle structured errors const error = await this.parseErrorResponse(response); if (onError) { onError(error); @@ -185,15 +194,19 @@ export class ChatService { } if (stream) { - return this.handleStreamResponse( + await this.handleStreamResponse( response, onChunk, onComplete, onError, - options.onReasoningChunk + onReasoningChunk, + onModel, + conversationId, + abortController.signal ); + return; } else { - return this.handleNonStreamResponse(response, onComplete, onError); + return this.handleNonStreamResponse(response, onComplete, onError, onModel); } } catch (error) { if (error instanceof Error && error.name === 'AbortError') { @@ -227,18 +240,19 @@ export class ChatService { onError(userFriendlyError); } throw userFriendlyError; + } finally { + this.abortControllers.delete(requestId); } } /** - * Handles streaming response from the chat completion API. - * Processes server-sent events and extracts content chunks from the stream. - * - * @param response - The fetch Response object containing the streaming data + * Handles streaming response from the chat completion API + * @param response - The Response object from the fetch request * @param onChunk - Optional callback invoked for each content chunk received * @param onComplete - Optional callback invoked when the stream is complete with full response * @param onError - Optional callback invoked if an error occurs during streaming * @param onReasoningChunk - Optional callback invoked for each reasoning content chunk + * @param conversationId - Optional conversation ID for per-conversation state tracking * @returns {Promise} Promise that resolves when streaming is complete * @throws {Error} if the stream cannot be read or parsed */ @@ -251,7 +265,10 @@ export class ChatService { timings?: ChatMessageTimings ) => void, onError?: (error: Error) => void, - onReasoningChunk?: (chunk: string) => void + onReasoningChunk?: (chunk: string) => void, + onModel?: (model: string) => void, + conversationId?: string, + abortSignal?: AbortSignal ): Promise { const reader = response.body?.getReader(); @@ -265,18 +282,25 @@ export class ChatService { let hasReceivedData = false; let lastTimings: ChatMessageTimings | undefined; let streamFinished = false; + let modelEmitted = false; try { let chunk = ''; while (true) { + if (abortSignal?.aborted) break; + const { done, value } = await reader.read(); if (done) break; + if (abortSignal?.aborted) break; + chunk += decoder.decode(value, { stream: true }); const lines = chunk.split('\n'); - chunk = lines.pop() || ''; // Save incomplete line for next read + chunk = lines.pop() || ''; for (const line of lines) { + if (abortSignal?.aborted) break; + if (line.startsWith('data: ')) { const data = line.slice(6); if (data === '[DONE]') { @@ -287,15 +311,19 @@ export class ChatService { try { const parsed: ApiChatCompletionStreamChunk = JSON.parse(data); + const chunkModel = this.extractModelName(parsed); + if (chunkModel && !modelEmitted) { + modelEmitted = true; + onModel?.(chunkModel); + } + const content = parsed.choices[0]?.delta?.content; const reasoningContent = parsed.choices[0]?.delta?.reasoning_content; const timings = parsed.timings; const promptProgress = parsed.prompt_progress; if (timings || promptProgress) { - this.updateProcessingState(timings, promptProgress); - - // Store the latest timing data + this.updateProcessingState(timings, promptProgress, conversationId); if (timings) { lastTimings = timings; } @@ -304,21 +332,29 @@ export class ChatService { if (content) { hasReceivedData = true; aggregatedContent += content; - onChunk?.(content); + if (!abortSignal?.aborted) { + onChunk?.(content); + } } if (reasoningContent) { hasReceivedData = true; fullReasoningContent += reasoningContent; - onReasoningChunk?.(reasoningContent); + if (!abortSignal?.aborted) { + onReasoningChunk?.(reasoningContent); + } } } catch (e) { console.error('Error parsing JSON chunk:', e); } } } + + if (abortSignal?.aborted) break; } + if (abortSignal?.aborted) return; + if (streamFinished) { if (!hasReceivedData && aggregatedContent.length === 0) { const noResponseError = new Error('No response received from server. Please try again.'); @@ -355,7 +391,8 @@ export class ChatService { reasoningContent?: string, timings?: ChatMessageTimings ) => void, - onError?: (error: Error) => void + onError?: (error: Error) => void, + onModel?: (model: string) => void ): Promise { try { const responseText = await response.text(); @@ -366,6 +403,12 @@ export class ChatService { } const data: ApiChatCompletionResponse = JSON.parse(responseText); + + const responseModel = this.extractModelName(data); + if (responseModel) { + onModel?.(responseModel); + } + const content = data.choices[0]?.message?.content || ''; const reasoningContent = data.choices[0]?.message?.reasoning_content; @@ -445,6 +488,19 @@ export class ChatService { }); } + // Handle legacy 'context' type from old webui (pasted content) + const legacyContextFiles = message.extra.filter( + (extra: DatabaseMessageExtra): extra is DatabaseMessageExtraLegacyContext => + extra.type === 'context' + ); + + for (const legacyContextFile of legacyContextFiles) { + contentParts.push({ + type: 'text', + text: `\n\n--- File: ${legacyContextFile.name} ---\n${legacyContextFile.content}` + }); + } + const audioFiles = message.extra.filter( (extra: DatabaseMessageExtra): extra is DatabaseMessageExtraAudioFile => extra.type === 'audioFile' @@ -520,10 +576,18 @@ export class ChatService { * * @public */ - public abort(): void { - if (this.abortController) { - this.abortController.abort(); - this.abortController = null; + public abort(conversationId?: string): void { + if (conversationId) { + const abortController = this.abortControllers.get(conversationId); + if (abortController) { + abortController.abort(); + this.abortControllers.delete(conversationId); + } + } else { + for (const controller of this.abortControllers.values()) { + controller.abort(); + } + this.abortControllers.clear(); } } @@ -581,32 +645,66 @@ export class ChatService { return error; } catch { - // If we can't parse the error response, return a generic error const fallback = new Error(`Server error (${response.status}): ${response.statusText}`); fallback.name = 'HttpError'; return fallback; } } + private extractModelName(data: unknown): string | undefined { + const asRecord = (value: unknown): Record | undefined => { + return typeof value === 'object' && value !== null + ? (value as Record) + : undefined; + }; + + const getTrimmedString = (value: unknown): string | undefined => { + return typeof value === 'string' && value.trim() ? value.trim() : undefined; + }; + + const root = asRecord(data); + if (!root) return undefined; + + // 1) root (some implementations provide `model` at the top level) + const rootModel = getTrimmedString(root.model); + if (rootModel) return rootModel; + + // 2) streaming choice (delta) or final response (message) + const firstChoice = Array.isArray(root.choices) ? asRecord(root.choices[0]) : undefined; + if (!firstChoice) return undefined; + + // priority: delta.model (first chunk) else message.model (final response) + const deltaModel = getTrimmedString(asRecord(firstChoice.delta)?.model); + if (deltaModel) return deltaModel; + + const messageModel = getTrimmedString(asRecord(firstChoice.message)?.model); + if (messageModel) return messageModel; + + // avoid guessing from non-standard locations (metadata, etc.) + return undefined; + } + private updateProcessingState( timings?: ChatMessageTimings, - promptProgress?: ChatMessagePromptProgress + promptProgress?: ChatMessagePromptProgress, + conversationId?: string ): void { - // Calculate tokens per second from timing data const tokensPerSecond = timings?.predicted_ms && timings?.predicted_n ? (timings.predicted_n / timings.predicted_ms) * 1000 : 0; - // Update slots service with timing data (async but don't wait) slotsService - .updateFromTimingData({ - prompt_n: timings?.prompt_n || 0, - predicted_n: timings?.predicted_n || 0, - predicted_per_second: tokensPerSecond, - cache_n: timings?.cache_n || 0, - prompt_progress: promptProgress - }) + .updateFromTimingData( + { + prompt_n: timings?.prompt_n || 0, + predicted_n: timings?.predicted_n || 0, + predicted_per_second: tokensPerSecond, + cache_n: timings?.cache_n || 0, + prompt_progress: promptProgress + }, + conversationId + ) .catch((error) => { console.warn('Failed to update processing state:', error); }); diff --git a/tools/server/webui/src/lib/services/models.ts b/tools/server/webui/src/lib/services/models.ts new file mode 100644 index 0000000000000..1c7fa3b45631c --- /dev/null +++ b/tools/server/webui/src/lib/services/models.ts @@ -0,0 +1,22 @@ +import { base } from '$app/paths'; +import { config } from '$lib/stores/settings.svelte'; +import type { ApiModelListResponse } from '$lib/types/api'; + +export class ModelsService { + static async list(): Promise { + const currentConfig = config(); + const apiKey = currentConfig.apiKey?.toString().trim(); + + const response = await fetch(`${base}/v1/models`, { + headers: { + ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}) + } + }); + + if (!response.ok) { + throw new Error(`Failed to fetch model list (status ${response.status})`); + } + + return response.json() as Promise; + } +} diff --git a/tools/server/webui/src/lib/services/slots.ts b/tools/server/webui/src/lib/services/slots.ts index 06c0a77de9138..e99297d6a0506 100644 --- a/tools/server/webui/src/lib/services/slots.ts +++ b/tools/server/webui/src/lib/services/slots.ts @@ -37,6 +37,8 @@ export class SlotsService { private callbacks: Set<(state: ApiProcessingState | null) => void> = new Set(); private isStreamingActive: boolean = false; private lastKnownState: ApiProcessingState | null = null; + private conversationStates: Map = new Map(); + private activeConversationId: string | null = null; /** * Start streaming session tracking @@ -75,6 +77,62 @@ export class SlotsService { return this.isStreamingActive; } + /** + * Set the active conversation for statistics display + */ + setActiveConversation(conversationId: string | null): void { + this.activeConversationId = conversationId; + this.notifyCallbacks(); + } + + /** + * Update processing state for a specific conversation + */ + updateConversationState(conversationId: string, state: ApiProcessingState | null): void { + this.conversationStates.set(conversationId, state); + + if (conversationId === this.activeConversationId) { + this.lastKnownState = state; + this.notifyCallbacks(); + } + } + + /** + * Get processing state for a specific conversation + */ + getConversationState(conversationId: string): ApiProcessingState | null { + return this.conversationStates.get(conversationId) || null; + } + + /** + * Clear state for a specific conversation + */ + clearConversationState(conversationId: string): void { + this.conversationStates.delete(conversationId); + + if (conversationId === this.activeConversationId) { + this.lastKnownState = null; + this.notifyCallbacks(); + } + } + + /** + * Notify all callbacks with current state + */ + private notifyCallbacks(): void { + const currentState = this.activeConversationId + ? this.conversationStates.get(this.activeConversationId) || null + : this.lastKnownState; + + for (const callback of this.callbacks) { + try { + callback(currentState); + } catch (error) { + console.error('Error in slots service callback:', error); + } + } + } + /** * @deprecated Polling is no longer used - timing data comes from ChatService streaming response * This method logs a warning if called to help identify outdated usage @@ -100,29 +158,29 @@ export class SlotsService { /** * Updates processing state with timing data from ChatService streaming response */ - async updateFromTimingData(timingData: { - prompt_n: number; - predicted_n: number; - predicted_per_second: number; - cache_n: number; - prompt_progress?: ChatMessagePromptProgress; - }): Promise { + async updateFromTimingData( + timingData: { + prompt_n: number; + predicted_n: number; + predicted_per_second: number; + cache_n: number; + prompt_progress?: ChatMessagePromptProgress; + }, + conversationId?: string + ): Promise { const processingState = await this.parseCompletionTimingData(timingData); - // Only update if we successfully parsed the state if (processingState === null) { console.warn('Failed to parse timing data - skipping update'); + return; } - this.lastKnownState = processingState; - - for (const callback of this.callbacks) { - try { - callback(processingState); - } catch (error) { - console.error('Error in timing callback:', error); - } + if (conversationId) { + this.updateConversationState(conversationId, processingState); + } else { + this.lastKnownState = processingState; + this.notifyCallbacks(); } } @@ -143,6 +201,7 @@ export class SlotsService { ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}) } }); + if (response.ok) { const slotsData = await response.json(); if (Array.isArray(slotsData) && slotsData.length > 0) { @@ -179,6 +238,7 @@ export class SlotsService { if (contextTotal === null) { console.warn('No context total available - cannot calculate processing state'); + return null; } @@ -214,13 +274,21 @@ export class SlotsService { /** * Get current processing state * Returns the last known state from timing data, or null if no data available + * If activeConversationId is set, returns state for that conversation */ async getCurrentState(): Promise { + if (this.activeConversationId) { + const conversationState = this.conversationStates.get(this.activeConversationId); + + if (conversationState) { + return conversationState; + } + } + if (this.lastKnownState) { return this.lastKnownState; } try { - // Import dynamically to avoid circular dependency const { chatStore } = await import('$lib/stores/chat.svelte'); const messages = chatStore.activeMessages; diff --git a/tools/server/webui/src/lib/stores/chat.svelte.ts b/tools/server/webui/src/lib/stores/chat.svelte.ts index 5b77abb4cb21c..a2e74a2e10721 100644 --- a/tools/server/webui/src/lib/stores/chat.svelte.ts +++ b/tools/server/webui/src/lib/stores/chat.svelte.ts @@ -1,11 +1,12 @@ import { DatabaseStore } from '$lib/stores/database'; import { chatService, slotsService } from '$lib/services'; -import { serverStore } from '$lib/stores/server.svelte'; import { config } from '$lib/stores/settings.svelte'; +import { normalizeModelName } from '$lib/utils/model-names'; import { filterByLeafNodeId, findLeafNode, findDescendantMessages } from '$lib/utils/branching'; import { browser } from '$app/environment'; import { goto } from '$app/navigation'; import { toast } from 'svelte-sonner'; +import { SvelteMap } from 'svelte/reactivity'; import type { ExportedConversations } from '$lib/types/database'; /** @@ -50,6 +51,8 @@ class ChatStore { errorDialogState = $state<{ type: 'timeout' | 'server'; message: string } | null>(null); isInitialized = $state(false); isLoading = $state(false); + conversationLoadingStates = new SvelteMap(); + conversationStreamingStates = new SvelteMap(); titleUpdateConfirmationCallback?: (currentTitle: string, newTitle: string) => Promise; constructor() { @@ -94,6 +97,13 @@ class ChatStore { this.activeConversation = conversation; this.activeMessages = []; + slotsService.setActiveConversation(conversation.id); + + const isConvLoading = this.isConversationLoading(conversation.id); + this.isLoading = isConvLoading; + + this.currentResponse = ''; + await goto(`#/chat/${conversation.id}`); return conversation.id; @@ -114,6 +124,14 @@ class ChatStore { this.activeConversation = conversation; + slotsService.setActiveConversation(convId); + + const isConvLoading = this.isConversationLoading(convId); + this.isLoading = isConvLoading; + + const streamingState = this.getConversationStreaming(convId); + this.currentResponse = streamingState?.response || ''; + if (conversation.currNode) { const allMessages = await DatabaseStore.getConversationMessages(convId); this.activeMessages = filterByLeafNodeId( @@ -285,6 +303,47 @@ class ChatStore { return apiOptions; } + /** + * Helper methods for per-conversation loading state management + */ + private setConversationLoading(convId: string, loading: boolean): void { + if (loading) { + this.conversationLoadingStates.set(convId, true); + if (this.activeConversation?.id === convId) { + this.isLoading = true; + } + } else { + this.conversationLoadingStates.delete(convId); + if (this.activeConversation?.id === convId) { + this.isLoading = false; + } + } + } + + private isConversationLoading(convId: string): boolean { + return this.conversationLoadingStates.get(convId) || false; + } + + private setConversationStreaming(convId: string, response: string, messageId: string): void { + this.conversationStreamingStates.set(convId, { response, messageId }); + if (this.activeConversation?.id === convId) { + this.currentResponse = response; + } + } + + private clearConversationStreaming(convId: string): void { + this.conversationStreamingStates.delete(convId); + if (this.activeConversation?.id === convId) { + this.currentResponse = ''; + } + } + + private getConversationStreaming( + convId: string + ): { response: string; messageId: string } | undefined { + return this.conversationStreamingStates.get(convId); + } + /** * Handles streaming chat completion with the AI model * @param allMessages - All messages in the conversation @@ -300,150 +359,162 @@ class ChatStore { ): Promise { let streamedContent = ''; let streamedReasoningContent = ''; - let modelCaptured = false; - const captureModelIfNeeded = (updateDbImmediately = true): string | undefined => { - if (!modelCaptured) { - const currentModelName = serverStore.modelName; + let resolvedModel: string | null = null; + let modelPersisted = false; - if (currentModelName) { - if (updateDbImmediately) { - DatabaseStore.updateMessage(assistantMessage.id, { model: currentModelName }).catch( - console.error - ); - } + const recordModel = (modelName: string, persistImmediately = true): void => { + const normalizedModel = normalizeModelName(modelName); - const messageIndex = this.findMessageIndex(assistantMessage.id); + if (!normalizedModel || normalizedModel === resolvedModel) { + return; + } - this.updateMessageAtIndex(messageIndex, { model: currentModelName }); - modelCaptured = true; + resolvedModel = normalizedModel; - return currentModelName; - } + const messageIndex = this.findMessageIndex(assistantMessage.id); + + this.updateMessageAtIndex(messageIndex, { model: normalizedModel }); + + if (persistImmediately && !modelPersisted) { + modelPersisted = true; + DatabaseStore.updateMessage(assistantMessage.id, { model: normalizedModel }).catch( + (error) => { + console.error('Failed to persist model name:', error); + modelPersisted = false; + resolvedModel = null; + } + ); } - return undefined; }; slotsService.startStreaming(); + slotsService.setActiveConversation(assistantMessage.convId); - await chatService.sendMessage(allMessages, { - ...this.getApiOptions(), - - onChunk: (chunk: string) => { - streamedContent += chunk; - this.currentResponse = streamedContent; - - captureModelIfNeeded(); - const messageIndex = this.findMessageIndex(assistantMessage.id); - this.updateMessageAtIndex(messageIndex, { - content: streamedContent - }); - }, + await chatService.sendMessage( + allMessages, + { + ...this.getApiOptions(), + + onChunk: (chunk: string) => { + streamedContent += chunk; + this.setConversationStreaming( + assistantMessage.convId, + streamedContent, + assistantMessage.id + ); - onReasoningChunk: (reasoningChunk: string) => { - streamedReasoningContent += reasoningChunk; + const messageIndex = this.findMessageIndex(assistantMessage.id); + this.updateMessageAtIndex(messageIndex, { + content: streamedContent + }); + }, - captureModelIfNeeded(); + onReasoningChunk: (reasoningChunk: string) => { + streamedReasoningContent += reasoningChunk; - const messageIndex = this.findMessageIndex(assistantMessage.id); + const messageIndex = this.findMessageIndex(assistantMessage.id); - this.updateMessageAtIndex(messageIndex, { thinking: streamedReasoningContent }); - }, + this.updateMessageAtIndex(messageIndex, { thinking: streamedReasoningContent }); + }, - onComplete: async ( - finalContent?: string, - reasoningContent?: string, - timings?: ChatMessageTimings - ) => { - slotsService.stopStreaming(); + onModel: (modelName: string) => { + recordModel(modelName); + }, - const updateData: { - content: string; - thinking: string; - timings?: ChatMessageTimings; - model?: string; - } = { - content: finalContent || streamedContent, - thinking: reasoningContent || streamedReasoningContent, - timings: timings - }; + onComplete: async ( + finalContent?: string, + reasoningContent?: string, + timings?: ChatMessageTimings + ) => { + slotsService.stopStreaming(); + + const updateData: { + content: string; + thinking: string; + timings?: ChatMessageTimings; + model?: string; + } = { + content: finalContent || streamedContent, + thinking: reasoningContent || streamedReasoningContent, + timings: timings + }; - const capturedModel = captureModelIfNeeded(false); + if (resolvedModel && !modelPersisted) { + updateData.model = resolvedModel; + modelPersisted = true; + } - if (capturedModel) { - updateData.model = capturedModel; - } + await DatabaseStore.updateMessage(assistantMessage.id, updateData); - await DatabaseStore.updateMessage(assistantMessage.id, updateData); + const messageIndex = this.findMessageIndex(assistantMessage.id); - const messageIndex = this.findMessageIndex(assistantMessage.id); + const localUpdateData: { timings?: ChatMessageTimings; model?: string } = { + timings: timings + }; - const localUpdateData: { timings?: ChatMessageTimings; model?: string } = { - timings: timings - }; + if (updateData.model) { + localUpdateData.model = updateData.model; + } - if (updateData.model) { - localUpdateData.model = updateData.model; - } + this.updateMessageAtIndex(messageIndex, localUpdateData); - this.updateMessageAtIndex(messageIndex, localUpdateData); + await DatabaseStore.updateCurrentNode(assistantMessage.convId, assistantMessage.id); - await DatabaseStore.updateCurrentNode(this.activeConversation!.id, assistantMessage.id); - this.activeConversation!.currNode = assistantMessage.id; - await this.refreshActiveMessages(); + if (this.activeConversation?.id === assistantMessage.convId) { + this.activeConversation.currNode = assistantMessage.id; + await this.refreshActiveMessages(); + } - if (onComplete) { - await onComplete(streamedContent); - } + if (onComplete) { + await onComplete(streamedContent); + } - this.isLoading = false; - this.currentResponse = ''; - }, + this.setConversationLoading(assistantMessage.convId, false); + this.clearConversationStreaming(assistantMessage.convId); + slotsService.clearConversationState(assistantMessage.convId); + }, - onError: (error: Error) => { - slotsService.stopStreaming(); + onError: (error: Error) => { + slotsService.stopStreaming(); - if (error.name === 'AbortError' || error instanceof DOMException) { - this.isLoading = false; - this.currentResponse = ''; - return; - } + if (this.isAbortError(error)) { + this.setConversationLoading(assistantMessage.convId, false); + this.clearConversationStreaming(assistantMessage.convId); + slotsService.clearConversationState(assistantMessage.convId); + return; + } - console.error('Streaming error:', error); - this.isLoading = false; - this.currentResponse = ''; + console.error('Streaming error:', error); + this.setConversationLoading(assistantMessage.convId, false); + this.clearConversationStreaming(assistantMessage.convId); + slotsService.clearConversationState(assistantMessage.convId); - const messageIndex = this.activeMessages.findIndex( - (m: DatabaseMessage) => m.id === assistantMessage.id - ); + const messageIndex = this.activeMessages.findIndex( + (m: DatabaseMessage) => m.id === assistantMessage.id + ); - if (messageIndex !== -1) { - const [failedMessage] = this.activeMessages.splice(messageIndex, 1); + if (messageIndex !== -1) { + const [failedMessage] = this.activeMessages.splice(messageIndex, 1); - if (failedMessage) { - DatabaseStore.deleteMessage(failedMessage.id).catch((cleanupError) => { - console.error('Failed to remove assistant message after error:', cleanupError); - }); + if (failedMessage) { + DatabaseStore.deleteMessage(failedMessage.id).catch((cleanupError) => { + console.error('Failed to remove assistant message after error:', cleanupError); + }); + } } - } - const dialogType = error.name === 'TimeoutError' ? 'timeout' : 'server'; + const dialogType = error.name === 'TimeoutError' ? 'timeout' : 'server'; - this.showErrorDialog(dialogType, error.message); + this.showErrorDialog(dialogType, error.message); - if (onError) { - onError(error); + if (onError) { + onError(error); + } } - } - }); - } - - private showErrorDialog(type: 'timeout' | 'server', message: string): void { - this.errorDialogState = { type, message }; - } - - dismissErrorDialog(): void { - this.errorDialogState = null; + }, + assistantMessage.convId + ); } /** @@ -455,6 +526,14 @@ class ChatStore { return error instanceof Error && (error.name === 'AbortError' || error instanceof DOMException); } + private showErrorDialog(type: 'timeout' | 'server', message: string): void { + this.errorDialogState = { type, message }; + } + + dismissErrorDialog(): void { + this.errorDialogState = null; + } + /** * Finds the index of a message in the active messages array * @param messageId - The message ID to find @@ -491,7 +570,8 @@ class ChatStore { content: '', timestamp: Date.now(), thinking: '', - children: [] + children: [], + model: null }, parentId || null ); @@ -519,7 +599,12 @@ class ChatStore { * @param extras - Optional extra data (files, attachments, etc.) */ async sendMessage(content: string, extras?: DatabaseMessageExtra[]): Promise { - if ((!content.trim() && (!extras || extras.length === 0)) || this.isLoading) return; + if (!content.trim() && (!extras || extras.length === 0)) return; + + if (this.activeConversation && this.isConversationLoading(this.activeConversation.id)) { + console.log('Cannot send message: current conversation is already processing a message'); + return; + } let isNewConversation = false; @@ -534,8 +619,9 @@ class ChatStore { } this.errorDialogState = null; - this.isLoading = true; - this.currentResponse = ''; + + this.setConversationLoading(this.activeConversation.id, true); + this.clearConversationStreaming(this.activeConversation.id); let userMessage: DatabaseMessage | null = null; @@ -546,7 +632,6 @@ class ChatStore { throw new Error('Failed to add user message'); } - // If this is a new conversation, update the title with the first user prompt if (isNewConversation && content) { const title = content.trim(); await this.updateConversationName(this.activeConversation.id, title); @@ -559,19 +644,18 @@ class ChatStore { } this.activeMessages.push(assistantMessage); - // Don't update currNode until after streaming completes to maintain proper conversation path const conversationContext = this.activeMessages.slice(0, -1); await this.streamChatCompletion(conversationContext, assistantMessage); } catch (error) { if (this.isAbortError(error)) { - this.isLoading = false; + this.setConversationLoading(this.activeConversation!.id, false); return; } console.error('Failed to send message:', error); - this.isLoading = false; + this.setConversationLoading(this.activeConversation!.id, false); if (!this.errorDialogState) { if (error instanceof Error) { const dialogType = error.name === 'TimeoutError' ? 'timeout' : 'server'; @@ -587,12 +671,19 @@ class ChatStore { * Stops the current message generation * Aborts ongoing requests and saves partial response if available */ - stopGeneration(): void { + async stopGeneration(): Promise { + if (!this.activeConversation) return; + + const convId = this.activeConversation.id; + + await this.savePartialResponseIfNeeded(convId); + slotsService.stopStreaming(); - chatService.abort(); - this.savePartialResponseIfNeeded(); - this.isLoading = false; - this.currentResponse = ''; + chatService.abort(convId); + + this.setConversationLoading(convId, false); + this.clearConversationStreaming(convId); + slotsService.clearConversationState(convId); } /** @@ -604,6 +695,9 @@ class ChatStore { slotsService.stopStreaming(); chatService.abort(); await this.savePartialResponseIfNeeded(); + + this.conversationLoadingStates.clear(); + this.conversationStreamingStates.clear(); this.isLoading = false; this.currentResponse = ''; } @@ -612,12 +706,23 @@ class ChatStore { * Saves partial response if generation was interrupted * Preserves user's partial content and timing data when generation is stopped early */ - private async savePartialResponseIfNeeded(): Promise { - if (!this.currentResponse.trim() || !this.activeMessages.length) { + private async savePartialResponseIfNeeded(convId?: string): Promise { + const conversationId = convId || this.activeConversation?.id; + if (!conversationId) return; + + const streamingState = this.conversationStreamingStates.get(conversationId); + if (!streamingState || !streamingState.response.trim()) { return; } - const lastMessage = this.activeMessages[this.activeMessages.length - 1]; + const messages = + conversationId === this.activeConversation?.id + ? this.activeMessages + : await DatabaseStore.getConversationMessages(conversationId); + + if (!messages.length) return; + + const lastMessage = messages[messages.length - 1]; if (lastMessage && lastMessage.role === 'assistant') { try { @@ -626,7 +731,7 @@ class ChatStore { thinking?: string; timings?: ChatMessageTimings; } = { - content: this.currentResponse + content: streamingState.response }; if (lastMessage.thinking?.trim()) { @@ -640,7 +745,6 @@ class ChatStore { prompt_n: lastKnownState.promptTokens || 0, predicted_n: lastKnownState.tokensDecoded || 0, cache_n: lastKnownState.cacheTokens || 0, - // We don't have ms data from the state, but we can estimate predicted_ms: lastKnownState.tokensPerSecond && lastKnownState.tokensDecoded ? (lastKnownState.tokensDecoded / lastKnownState.tokensPerSecond) * 1000 @@ -701,7 +805,6 @@ class ChatStore { this.updateMessageAtIndex(messageIndex, { content: newContent }); await DatabaseStore.updateMessage(messageId, { content: newContent }); - // If this is the first user message, update the conversation title with confirmation if needed if (isFirstUserMessage && newContent.trim()) { await this.updateConversationTitleWithConfirmation( this.activeConversation.id, @@ -718,8 +821,8 @@ class ChatStore { this.activeMessages = this.activeMessages.slice(0, messageIndex + 1); this.updateConversationTimestamp(); - this.isLoading = true; - this.currentResponse = ''; + this.setConversationLoading(this.activeConversation.id, true); + this.clearConversationStreaming(this.activeConversation.id); try { const assistantMessage = await this.createAssistantMessage(); @@ -742,7 +845,7 @@ class ChatStore { ); } catch (regenerateError) { console.error('Failed to regenerate response:', regenerateError); - this.isLoading = false; + this.setConversationLoading(this.activeConversation!.id, false); const messageIndex = this.findMessageIndex(messageId); this.updateMessageAtIndex(messageIndex, { content: originalContent }); @@ -784,8 +887,8 @@ class ChatStore { this.activeMessages = this.activeMessages.slice(0, messageIndex); this.updateConversationTimestamp(); - this.isLoading = true; - this.currentResponse = ''; + this.setConversationLoading(this.activeConversation.id, true); + this.clearConversationStreaming(this.activeConversation.id); try { const parentMessageId = @@ -806,7 +909,7 @@ class ChatStore { await this.streamChatCompletion(conversationContext, assistantMessage); } catch (regenerateError) { console.error('Failed to regenerate response:', regenerateError); - this.isLoading = false; + this.setConversationLoading(this.activeConversation!.id, false); } } catch (error) { if (this.isAbortError(error)) return; @@ -862,7 +965,6 @@ class ChatStore { try { const currentConfig = config(); - // Only ask for confirmation if the setting is enabled and callback is provided if (currentConfig.askForTitleConfirmation && onConfirmationNeeded) { const conversation = await DatabaseStore.getConversation(convId); if (!conversation) return false; @@ -944,8 +1046,9 @@ class ChatStore { /** * Exports all conversations with their messages as a JSON file + * Returns the list of exported conversations */ - async exportAllConversations(): Promise { + async exportAllConversations(): Promise { try { const allConversations = await DatabaseStore.getAllConversations(); if (allConversations.length === 0) { @@ -972,6 +1075,7 @@ class ChatStore { URL.revokeObjectURL(url); toast.success(`All conversations (${allConversations.length}) prepared for download`); + return allConversations; } catch (err) { console.error('Failed to export conversations:', err); throw err; @@ -982,8 +1086,9 @@ class ChatStore { * Imports conversations from a JSON file. * Supports both single conversation (object) and multiple conversations (array). * Uses DatabaseStore for safe, encapsulated data access + * Returns the list of imported conversations */ - async importConversations(): Promise { + async importConversations(): Promise { return new Promise((resolve, reject) => { const input = document.createElement('input'); input.type = 'file'; @@ -1024,7 +1129,9 @@ class ChatStore { toast.success(`Imported ${result.imported} conversation(s), skipped ${result.skipped}`); - resolve(undefined); + // Extract the conversation objects from imported data + const importedConversations = importedData.map((item) => item.conv); + resolve(importedConversations); } catch (err: unknown) { const message = err instanceof Error ? err.message : 'Unknown error'; console.error('Failed to import conversations:', err); @@ -1170,14 +1277,16 @@ class ChatStore { } /** - * Clears the active conversation and resets state + * Clears the active conversation and messages * Used when navigating away from chat or starting fresh + * Note: Does not stop ongoing streaming to allow background completion */ clearActiveConversation(): void { this.activeConversation = null; this.activeMessages = []; - this.currentResponse = ''; this.isLoading = false; + this.currentResponse = ''; + slotsService.setActiveConversation(null); } /** Refreshes active messages based on currNode after branch navigation */ @@ -1419,8 +1528,8 @@ class ChatStore { return; } - this.isLoading = true; - this.currentResponse = ''; + this.setConversationLoading(this.activeConversation.id, true); + this.clearConversationStreaming(this.activeConversation.id); const newAssistantMessage = await DatabaseStore.createMessageBranch( { @@ -1430,7 +1539,8 @@ class ChatStore { role: 'assistant', content: '', thinking: '', - children: [] + children: [], + model: null }, parentMessage.id ); @@ -1454,7 +1564,7 @@ class ChatStore { if (this.isAbortError(error)) return; console.error('Failed to regenerate message with branching:', error); - this.isLoading = false; + this.setConversationLoading(this.activeConversation!.id, false); } } @@ -1466,8 +1576,8 @@ class ChatStore { if (!this.activeConversation) return; this.errorDialogState = null; - this.isLoading = true; - this.currentResponse = ''; + this.setConversationLoading(this.activeConversation.id, true); + this.clearConversationStreaming(this.activeConversation.id); try { // Get conversation path up to the user message @@ -1487,7 +1597,8 @@ class ChatStore { role: 'assistant', content: '', thinking: '', - children: [] + children: [], + model: null }, userMessageId ); @@ -1499,9 +1610,30 @@ class ChatStore { await this.streamChatCompletion(conversationPath, assistantMessage); } catch (error) { console.error('Failed to generate response:', error); - this.isLoading = false; + this.setConversationLoading(this.activeConversation!.id, false); } } + + /** + * Public methods for accessing per-conversation states + */ + public isConversationLoadingPublic(convId: string): boolean { + return this.isConversationLoading(convId); + } + + public getConversationStreamingPublic( + convId: string + ): { response: string; messageId: string } | undefined { + return this.getConversationStreaming(convId); + } + + public getAllLoadingConversations(): string[] { + return Array.from(this.conversationLoadingStates.keys()); + } + + public getAllStreamingConversations(): string[] { + return Array.from(this.conversationStreamingStates.keys()); + } } export const chatStore = new ChatStore(); @@ -1541,3 +1673,11 @@ export function stopGeneration() { chatStore.stopGeneration(); } export const messages = () => chatStore.activeMessages; + +// Per-conversation state access +export const isConversationLoading = (convId: string) => + chatStore.isConversationLoadingPublic(convId); +export const getConversationStreaming = (convId: string) => + chatStore.getConversationStreamingPublic(convId); +export const getAllLoadingConversations = () => chatStore.getAllLoadingConversations(); +export const getAllStreamingConversations = () => chatStore.getAllStreamingConversations(); diff --git a/tools/server/webui/src/lib/stores/models.svelte.ts b/tools/server/webui/src/lib/stores/models.svelte.ts new file mode 100644 index 0000000000000..bcb68826ce839 --- /dev/null +++ b/tools/server/webui/src/lib/stores/models.svelte.ts @@ -0,0 +1,187 @@ +import { ModelsService } from '$lib/services/models'; +import { persisted } from '$lib/stores/persisted.svelte'; +import { SELECTED_MODEL_LOCALSTORAGE_KEY } from '$lib/constants/localstorage-keys'; +import type { ModelOption } from '$lib/types/models'; + +type PersistedModelSelection = { + id: string; + model: string; +}; + +class ModelsStore { + private _models = $state([]); + private _loading = $state(false); + private _updating = $state(false); + private _error = $state(null); + private _selectedModelId = $state(null); + private _selectedModelName = $state(null); + private _persistedSelection = persisted( + SELECTED_MODEL_LOCALSTORAGE_KEY, + null + ); + + constructor() { + const persisted = this._persistedSelection.value; + if (persisted) { + this._selectedModelId = persisted.id; + this._selectedModelName = persisted.model; + } + } + + get models(): ModelOption[] { + return this._models; + } + + get loading(): boolean { + return this._loading; + } + + get updating(): boolean { + return this._updating; + } + + get error(): string | null { + return this._error; + } + + get selectedModelId(): string | null { + return this._selectedModelId; + } + + get selectedModelName(): string | null { + return this._selectedModelName; + } + + get selectedModel(): ModelOption | null { + if (!this._selectedModelId) { + return null; + } + + return this._models.find((model) => model.id === this._selectedModelId) ?? null; + } + + async fetch(force = false): Promise { + if (this._loading) return; + if (this._models.length > 0 && !force) return; + + this._loading = true; + this._error = null; + + try { + const response = await ModelsService.list(); + + const models: ModelOption[] = response.data.map((item, index) => { + const details = response.models?.[index]; + const rawCapabilities = Array.isArray(details?.capabilities) ? details?.capabilities : []; + const displayNameSource = + details?.name && details.name.trim().length > 0 ? details.name : item.id; + const displayName = this.toDisplayName(displayNameSource); + + return { + id: item.id, + name: displayName, + model: details?.model || item.id, + description: details?.description, + capabilities: rawCapabilities.filter((value): value is string => Boolean(value)), + details: details?.details, + meta: item.meta ?? null + } satisfies ModelOption; + }); + + this._models = models; + + const selection = this.determineInitialSelection(models); + + this._selectedModelId = selection.id; + this._selectedModelName = selection.model; + this._persistedSelection.value = + selection.id && selection.model ? { id: selection.id, model: selection.model } : null; + } catch (error) { + this._models = []; + this._error = error instanceof Error ? error.message : 'Failed to load models'; + + throw error; + } finally { + this._loading = false; + } + } + + async select(modelId: string): Promise { + if (!modelId || this._updating) { + return; + } + + if (this._selectedModelId === modelId) { + return; + } + + const option = this._models.find((model) => model.id === modelId); + if (!option) { + throw new Error('Selected model is not available'); + } + + this._updating = true; + this._error = null; + + try { + this._selectedModelId = option.id; + this._selectedModelName = option.model; + this._persistedSelection.value = { id: option.id, model: option.model }; + } finally { + this._updating = false; + } + } + + private toDisplayName(id: string): string { + const segments = id.split(/\\|\//); + const candidate = segments.pop(); + + return candidate && candidate.trim().length > 0 ? candidate : id; + } + + /** + * Determines which model should be selected after fetching the models list. + * Priority: current selection > persisted selection > first available model > none + */ + private determineInitialSelection(models: ModelOption[]): { + id: string | null; + model: string | null; + } { + const persisted = this._persistedSelection.value; + let nextSelectionId = this._selectedModelId ?? persisted?.id ?? null; + let nextSelectionName = this._selectedModelName ?? persisted?.model ?? null; + + if (nextSelectionId) { + const match = models.find((m) => m.id === nextSelectionId); + + if (match) { + nextSelectionId = match.id; + nextSelectionName = match.model; + } else if (models[0]) { + nextSelectionId = models[0].id; + nextSelectionName = models[0].model; + } else { + nextSelectionId = null; + nextSelectionName = null; + } + } else if (models[0]) { + nextSelectionId = models[0].id; + nextSelectionName = models[0].model; + } + + return { id: nextSelectionId, model: nextSelectionName }; + } +} + +export const modelsStore = new ModelsStore(); + +export const modelOptions = () => modelsStore.models; +export const modelsLoading = () => modelsStore.loading; +export const modelsUpdating = () => modelsStore.updating; +export const modelsError = () => modelsStore.error; +export const selectedModelId = () => modelsStore.selectedModelId; +export const selectedModelName = () => modelsStore.selectedModelName; +export const selectedModelOption = () => modelsStore.selectedModel; + +export const fetchModels = modelsStore.fetch.bind(modelsStore); +export const selectModel = modelsStore.select.bind(modelsStore); diff --git a/tools/server/webui/src/lib/stores/persisted.svelte.ts b/tools/server/webui/src/lib/stores/persisted.svelte.ts new file mode 100644 index 0000000000000..1e07f80ed7275 --- /dev/null +++ b/tools/server/webui/src/lib/stores/persisted.svelte.ts @@ -0,0 +1,50 @@ +import { browser } from '$app/environment'; + +type PersistedValue = { + get value(): T; + set value(newValue: T); +}; + +export function persisted(key: string, initialValue: T): PersistedValue { + let value = initialValue; + + if (browser) { + try { + const stored = localStorage.getItem(key); + + if (stored !== null) { + value = JSON.parse(stored) as T; + } + } catch (error) { + console.warn(`Failed to load ${key}:`, error); + } + } + + const persist = (next: T) => { + if (!browser) { + return; + } + + try { + if (next === null || next === undefined) { + localStorage.removeItem(key); + return; + } + + localStorage.setItem(key, JSON.stringify(next)); + } catch (error) { + console.warn(`Failed to persist ${key}:`, error); + } + }; + + return { + get value() { + return value; + }, + + set value(newValue: T) { + value = newValue; + persist(newValue); + } + }; +} diff --git a/tools/server/webui/src/lib/stores/settings.svelte.ts b/tools/server/webui/src/lib/stores/settings.svelte.ts index b330cbb4bf42e..b10f0dd3a4189 100644 --- a/tools/server/webui/src/lib/stores/settings.svelte.ts +++ b/tools/server/webui/src/lib/stores/settings.svelte.ts @@ -80,7 +80,8 @@ class SettingsStore { if (!browser) return; try { - const savedVal = JSON.parse(localStorage.getItem('config') || '{}'); + const storedConfigRaw = localStorage.getItem('config'); + const savedVal = JSON.parse(storedConfigRaw || '{}'); // Merge with defaults to prevent breaking changes this.config = { diff --git a/tools/server/webui/src/lib/types/api.d.ts b/tools/server/webui/src/lib/types/api.d.ts index d0e60a6c13706..6d76ab1f68e9d 100644 --- a/tools/server/webui/src/lib/types/api.d.ts +++ b/tools/server/webui/src/lib/types/api.d.ts @@ -36,6 +36,41 @@ export interface ApiChatMessageData { timestamp?: number; } +export interface ApiModelDataEntry { + id: string; + object: string; + created: number; + owned_by: string; + meta?: Record | null; +} + +export interface ApiModelDetails { + name: string; + model: string; + modified_at?: string; + size?: string | number; + digest?: string; + type?: string; + description?: string; + tags?: string[]; + capabilities?: string[]; + parameters?: string; + details?: { + parent_model?: string; + format?: string; + family?: string; + families?: string[]; + parameter_size?: string; + quantization_level?: string; + }; +} + +export interface ApiModelListResponse { + object: string; + data: ApiModelDataEntry[]; + models?: ApiModelDetails[]; +} + export interface ApiLlamaCppServerProps { default_generation_settings: { id: number; @@ -120,6 +155,7 @@ export interface ApiChatCompletionRequest { content: string | ApiChatMessageContentPart[]; }>; stream?: boolean; + model?: string; // Reasoning parameters reasoning_format?: string; // Generation parameters @@ -150,10 +186,14 @@ export interface ApiChatCompletionRequest { } export interface ApiChatCompletionStreamChunk { + model?: string; choices: Array<{ + model?: string; + metadata?: { model?: string }; delta: { content?: string; reasoning_content?: string; + model?: string; }; }>; timings?: { @@ -167,10 +207,14 @@ export interface ApiChatCompletionStreamChunk { } export interface ApiChatCompletionResponse { + model?: string; choices: Array<{ + model?: string; + metadata?: { model?: string }; message: { content: string; reasoning_content?: string; + model?: string; }; }>; } diff --git a/tools/server/webui/src/lib/types/database.d.ts b/tools/server/webui/src/lib/types/database.d.ts index 7f6b76ba271cc..b5318b73f4108 100644 --- a/tools/server/webui/src/lib/types/database.d.ts +++ b/tools/server/webui/src/lib/types/database.d.ts @@ -34,11 +34,22 @@ export interface DatabaseMessageExtraPdfFile { processedAsImages: boolean; // Whether PDF was processed as images } +/** + * Legacy format from old webui - pasted content was stored as "context" type + * @deprecated Use DatabaseMessageExtraTextFile instead + */ +export interface DatabaseMessageExtraLegacyContext { + type: 'context'; + name: string; + content: string; +} + export type DatabaseMessageExtra = | DatabaseMessageExtraImageFile | DatabaseMessageExtraTextFile | DatabaseMessageExtraAudioFile - | DatabaseMessageExtraPdfFile; + | DatabaseMessageExtraPdfFile + | DatabaseMessageExtraLegacyContext; export interface DatabaseMessage { id: string; diff --git a/tools/server/webui/src/lib/types/models.d.ts b/tools/server/webui/src/lib/types/models.d.ts new file mode 100644 index 0000000000000..3b6bad5f0feae --- /dev/null +++ b/tools/server/webui/src/lib/types/models.d.ts @@ -0,0 +1,11 @@ +import type { ApiModelDataEntry, ApiModelDetails } from '$lib/types/api'; + +export interface ModelOption { + id: string; + name: string; + model: string; + description?: string; + capabilities: string[]; + details?: ApiModelDetails['details']; + meta?: ApiModelDataEntry['meta']; +} diff --git a/tools/server/webui/src/lib/types/settings.d.ts b/tools/server/webui/src/lib/types/settings.d.ts index 4311f779ad841..659fb0c7d1cf5 100644 --- a/tools/server/webui/src/lib/types/settings.d.ts +++ b/tools/server/webui/src/lib/types/settings.d.ts @@ -41,6 +41,7 @@ export interface SettingsChatServiceOptions { // Callbacks onChunk?: (chunk: string) => void; onReasoningChunk?: (chunk: string) => void; + onModel?: (model: string) => void; onComplete?: (response: string, reasoningContent?: string, timings?: ChatMessageTimings) => void; onError?: (error: Error) => void; } diff --git a/tools/server/webui/src/lib/utils/conversation-utils.ts b/tools/server/webui/src/lib/utils/conversation-utils.ts new file mode 100644 index 0000000000000..aee244a08055e --- /dev/null +++ b/tools/server/webui/src/lib/utils/conversation-utils.ts @@ -0,0 +1,30 @@ +/** + * Utility functions for conversation data manipulation + */ + +/** + * Creates a map of conversation IDs to their message counts from exported conversation data + * @param exportedData - Array of exported conversations with their messages + * @returns Map of conversation ID to message count + */ +export function createMessageCountMap( + exportedData: Array<{ conv: DatabaseConversation; messages: DatabaseMessage[] }> +): Map { + const countMap = new Map(); + + for (const item of exportedData) { + countMap.set(item.conv.id, item.messages.length); + } + + return countMap; +} + +/** + * Gets the message count for a specific conversation from the count map + * @param conversationId - The ID of the conversation + * @param countMap - Map of conversation IDs to message counts + * @returns The message count, or 0 if not found + */ +export function getMessageCount(conversationId: string, countMap: Map): number { + return countMap.get(conversationId) ?? 0; +} diff --git a/tools/server/webui/src/lib/utils/is-ime-composing.ts b/tools/server/webui/src/lib/utils/is-ime-composing.ts new file mode 100644 index 0000000000000..9182ea4f3603f --- /dev/null +++ b/tools/server/webui/src/lib/utils/is-ime-composing.ts @@ -0,0 +1,5 @@ +export function isIMEComposing(event: KeyboardEvent) { + // Check for IME composition using isComposing property and keyCode 229 (specifically for IME composition on Safari, which is notorious for not supporting KeyboardEvent.isComposing) + // This prevents form submission when confirming IME word selection (e.g., Japanese/Chinese input) + return event.isComposing || event.keyCode === 229; +} diff --git a/tools/server/webui/src/lib/utils/model-names.test.ts b/tools/server/webui/src/lib/utils/model-names.test.ts new file mode 100644 index 0000000000000..e19e92f777092 --- /dev/null +++ b/tools/server/webui/src/lib/utils/model-names.test.ts @@ -0,0 +1,44 @@ +import { describe, expect, it } from 'vitest'; +import { isValidModelName, normalizeModelName } from './model-names'; + +describe('normalizeModelName', () => { + it('extracts filename from forward slash path', () => { + expect(normalizeModelName('models/model-name-1')).toBe('model-name-1'); + expect(normalizeModelName('path/to/model/model-name-2')).toBe('model-name-2'); + }); + + it('extracts filename from backslash path', () => { + expect(normalizeModelName('C\\Models\\model-name-1')).toBe('model-name-1'); + expect(normalizeModelName('path\\to\\model\\model-name-2')).toBe('model-name-2'); + }); + + it('handles mixed path separators', () => { + expect(normalizeModelName('path/to\\model/model-name-2')).toBe('model-name-2'); + }); + + it('returns simple names as-is', () => { + expect(normalizeModelName('simple-model')).toBe('simple-model'); + expect(normalizeModelName('model-name-2')).toBe('model-name-2'); + }); + + it('trims whitespace', () => { + expect(normalizeModelName(' model-name ')).toBe('model-name'); + }); + + it('returns empty string for empty input', () => { + expect(normalizeModelName('')).toBe(''); + expect(normalizeModelName(' ')).toBe(''); + }); +}); + +describe('isValidModelName', () => { + it('returns true for valid names', () => { + expect(isValidModelName('model')).toBe(true); + expect(isValidModelName('path/to/model.bin')).toBe(true); + }); + + it('returns false for empty values', () => { + expect(isValidModelName('')).toBe(false); + expect(isValidModelName(' ')).toBe(false); + }); +}); diff --git a/tools/server/webui/src/lib/utils/model-names.ts b/tools/server/webui/src/lib/utils/model-names.ts new file mode 100644 index 0000000000000..b1ea9d95361e6 --- /dev/null +++ b/tools/server/webui/src/lib/utils/model-names.ts @@ -0,0 +1,39 @@ +/** + * Normalizes a model name by extracting the filename from a path. + * + * Handles both forward slashes (/) and backslashes (\) as path separators. + * If the model name is just a filename (no path), returns it as-is. + * + * @param modelName - The model name or path to normalize + * @returns The normalized model name (filename only) + * + * @example + * normalizeModelName('models/llama-3.1-8b') // Returns: 'llama-3.1-8b' + * normalizeModelName('C:\\Models\\gpt-4') // Returns: 'gpt-4' + * normalizeModelName('simple-model') // Returns: 'simple-model' + * normalizeModelName(' spaced ') // Returns: 'spaced' + * normalizeModelName('') // Returns: '' + */ +export function normalizeModelName(modelName: string): string { + const trimmed = modelName.trim(); + + if (!trimmed) { + return ''; + } + + const segments = trimmed.split(/[\\/]/); + const candidate = segments.pop(); + const normalized = candidate?.trim(); + + return normalized && normalized.length > 0 ? normalized : trimmed; +} + +/** + * Validates if a model name is valid (non-empty after normalization). + * + * @param modelName - The model name to validate + * @returns true if valid, false otherwise + */ +export function isValidModelName(modelName: string): boolean { + return normalizeModelName(modelName).length > 0; +} diff --git a/tools/server/webui/src/lib/utils/portal-to-body.ts b/tools/server/webui/src/lib/utils/portal-to-body.ts new file mode 100644 index 0000000000000..bffbe89006948 --- /dev/null +++ b/tools/server/webui/src/lib/utils/portal-to-body.ts @@ -0,0 +1,20 @@ +export function portalToBody(node: HTMLElement) { + if (typeof document === 'undefined') { + return; + } + + const target = document.body; + if (!target) { + return; + } + + target.appendChild(node); + + return { + destroy() { + if (node.parentNode === target) { + target.removeChild(node); + } + } + }; +} diff --git a/tools/server/webui/src/routes/+layout.svelte b/tools/server/webui/src/routes/+layout.svelte index 8912f642ceffc..075bdd356bc99 100644 --- a/tools/server/webui/src/routes/+layout.svelte +++ b/tools/server/webui/src/routes/+layout.svelte @@ -165,10 +165,10 @@ diff --git a/tools/server/webui/src/routes/+page.svelte b/tools/server/webui/src/routes/+page.svelte index 2cd2d5c37313a..cd18dabccb9de 100644 --- a/tools/server/webui/src/routes/+page.svelte +++ b/tools/server/webui/src/routes/+page.svelte @@ -2,6 +2,9 @@ import { ChatScreen } from '$lib/components/app'; import { chatStore, isInitialized } from '$lib/stores/chat.svelte'; import { onMount } from 'svelte'; + import { page } from '$app/state'; + + let qParam = $derived(page.url.searchParams.get('q')); onMount(async () => { if (!isInitialized) { @@ -9,6 +12,11 @@ } chatStore.clearActiveConversation(); + + if (qParam !== null) { + await chatStore.createConversation(); + await chatStore.sendMessage(qParam); + } }); diff --git a/tools/server/webui/src/routes/chat/[id]/+page.svelte b/tools/server/webui/src/routes/chat/[id]/+page.svelte index 5b6c73d6d4796..af91a8e9ef755 100644 --- a/tools/server/webui/src/routes/chat/[id]/+page.svelte +++ b/tools/server/webui/src/routes/chat/[id]/+page.svelte @@ -1,45 +1,26 @@ diff --git a/tools/server/webui/svelte.config.js b/tools/server/webui/svelte.config.js index c24f879ddaf42..f25494236bddd 100644 --- a/tools/server/webui/svelte.config.js +++ b/tools/server/webui/svelte.config.js @@ -7,6 +7,7 @@ const config = { // Consult https://svelte.dev/docs/kit/integrations // for more information about preprocessors preprocess: [vitePreprocess(), mdsvex()], + kit: { paths: { relative: true @@ -23,6 +24,7 @@ const config = { bundleStrategy: 'inline' } }, + extensions: ['.svelte', '.svx'] }; diff --git a/tools/server/webui/vite.config.ts b/tools/server/webui/vite.config.ts index 7f7ce3bed3fcc..b077e232ab043 100644 --- a/tools/server/webui/vite.config.ts +++ b/tools/server/webui/vite.config.ts @@ -75,7 +75,12 @@ function llamaCppBuildPlugin() { } export default defineConfig({ + build: { + chunkSizeWarningLimit: 3072 + }, + plugins: [tailwindcss(), sveltekit(), devtoolsJson(), llamaCppBuildPlugin()], + test: { projects: [ { @@ -123,6 +128,7 @@ export default defineConfig({ } ] }, + server: { proxy: { '/v1': 'http://localhost:8080/',