Switch GPU backend from Vulkan to SYCL for ~2x inference performance on Intel GPUs
Build ggml-sycl from upstream llama.cpp (commit a5bb8ba4, matching ollama's vendored ggml) using Intel oneAPI 2025.1.1 in a multi-stage Docker build. Patch two ollama-specific API divergences via patch-sycl.py: added batch_size parameter to graph_compute, removed GGML_TENSOR_FLAG_COMPUTE skip-check that caused all compute nodes to be bypassed. Tested: gemma3:1b — 27/27 layers on GPU, 10.2 tok/s gen, 65.3 tok/s prompt eval. Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
+87
-9
@@ -1,15 +1,93 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Stage 1: Build ggml-sycl backend from ollama's ggml source using Intel oneAPI
|
||||||
|
# =============================================================================
|
||||||
|
FROM intel/oneapi-basekit:2025.1.1-0-devel-ubuntu24.04 AS sycl-builder
|
||||||
|
|
||||||
|
ARG OLLAMA_VERSION=0.15.6
|
||||||
|
|
||||||
|
# Clone ollama source and the MATCHING ggml-sycl source from upstream llama.cpp.
|
||||||
|
# ollama v0.15.6 vendors ggml at commit a5bb8ba4 — we MUST use the same commit
|
||||||
|
# to ensure struct layouts, operation enums, and internal APIs match exactly.
|
||||||
|
# (ollama excludes ggml-sycl from its vendored ggml, but keeps the header)
|
||||||
|
ARG GGML_COMMIT=a5bb8ba4c50257437630c136210396810741bbf7
|
||||||
|
RUN git clone --depth 1 --branch v${OLLAMA_VERSION} \
|
||||||
|
https://github.com/ollama/ollama.git /ollama && \
|
||||||
|
git init /tmp/llama.cpp && \
|
||||||
|
cd /tmp/llama.cpp && \
|
||||||
|
git remote add origin https://github.com/ggml-org/llama.cpp.git && \
|
||||||
|
git sparse-checkout set ggml/src/ggml-sycl && \
|
||||||
|
git fetch --depth 1 origin ${GGML_COMMIT} && \
|
||||||
|
git checkout FETCH_HEAD && \
|
||||||
|
cp -r /tmp/llama.cpp/ggml/src/ggml-sycl \
|
||||||
|
/ollama/ml/backend/ggml/ggml/src/ggml-sycl && \
|
||||||
|
rm -rf /tmp/llama.cpp
|
||||||
|
|
||||||
|
WORKDIR /ollama
|
||||||
|
|
||||||
|
# Patch ggml-sycl to match ollama's modified ggml backend API:
|
||||||
|
# 1. graph_compute has an extra int batch_size parameter in ollama
|
||||||
|
# 2. GGML_TENSOR_FLAG_COMPUTE doesn't exist in ollama's ggml
|
||||||
|
COPY patch-sycl.py /tmp/patch-sycl.py
|
||||||
|
RUN python3 /tmp/patch-sycl.py ml/backend/ggml/ggml/src/ggml-sycl/ggml-sycl.cpp
|
||||||
|
|
||||||
|
# Build the SYCL backend as a dynamic library
|
||||||
|
# Note: oneAPI env is already set in the base image, no need to source setvars.sh
|
||||||
|
RUN cmake -B build \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DCMAKE_C_COMPILER=icx \
|
||||||
|
-DCMAKE_CXX_COMPILER=icpx \
|
||||||
|
-DGGML_SYCL=ON \
|
||||||
|
-DGGML_SYCL_TARGET=INTEL \
|
||||||
|
-DOLLAMA_RUNNER_DIR=sycl && \
|
||||||
|
cmake --build build --parallel $(nproc) --target ggml-sycl
|
||||||
|
|
||||||
|
# Collect the SYCL runner and its oneAPI runtime dependencies into /sycl-runner
|
||||||
|
RUN mkdir -p /sycl-runner && \
|
||||||
|
cp build/lib/ollama/libggml-sycl.so /sycl-runner/ && \
|
||||||
|
# SYCL / DPC++ runtime
|
||||||
|
for f in libsycl.so*; do true; done && \
|
||||||
|
cp /opt/intel/oneapi/compiler/latest/lib/libsycl.so* /sycl-runner/ && \
|
||||||
|
# Unified Runtime (oneAPI 2025+) — search multiple possible locations
|
||||||
|
find /opt/intel/oneapi -name 'libur_loader.so*' | head -3 | xargs -I{} cp {} /sycl-runner/ && \
|
||||||
|
find /opt/intel/oneapi -name 'libur_adapter_level_zero.so*' | head -3 | xargs -I{} cp {} /sycl-runner/ && \
|
||||||
|
find /opt/intel/oneapi -maxdepth 4 -name 'libumf.so*' | head -3 | xargs -I{} cp {} /sycl-runner/ && \
|
||||||
|
# oneDNN
|
||||||
|
cp /opt/intel/oneapi/dnnl/latest/lib/libdnnl.so* /sycl-runner/ 2>/dev/null; \
|
||||||
|
# oneMKL
|
||||||
|
cp /opt/intel/oneapi/mkl/latest/lib/libmkl_core.so* /sycl-runner/ && \
|
||||||
|
cp /opt/intel/oneapi/mkl/latest/lib/libmkl_intel_ilp64.so* /sycl-runner/ && \
|
||||||
|
cp /opt/intel/oneapi/mkl/latest/lib/libmkl_sycl_blas.so* /sycl-runner/ && \
|
||||||
|
cp /opt/intel/oneapi/mkl/latest/lib/libmkl_tbb_thread.so* /sycl-runner/ && \
|
||||||
|
# TBB
|
||||||
|
cp /opt/intel/oneapi/tbb/latest/lib/intel64/gcc*/libtbb.so* /sycl-runner/ && \
|
||||||
|
# Intel compiler runtime
|
||||||
|
cp /opt/intel/oneapi/compiler/latest/lib/libsvml.so /sycl-runner/ && \
|
||||||
|
cp /opt/intel/oneapi/compiler/latest/lib/libimf.so /sycl-runner/ && \
|
||||||
|
cp /opt/intel/oneapi/compiler/latest/lib/libintlc.so* /sycl-runner/ && \
|
||||||
|
cp /opt/intel/oneapi/compiler/latest/lib/libirng.so /sycl-runner/ && \
|
||||||
|
cp /opt/intel/oneapi/compiler/latest/lib/libiomp5.so /sycl-runner/ && \
|
||||||
|
# Level-zero PI plugin (legacy, may not exist)
|
||||||
|
cp /opt/intel/oneapi/compiler/latest/lib/libpi_level_zero.so* /sycl-runner/ 2>/dev/null; \
|
||||||
|
# SYCL SPIR-V fallback kernels (needed for bfloat16, complex math, etc.)
|
||||||
|
cp /opt/intel/oneapi/compiler/latest/lib/libsycl-fallback*.spv /sycl-runner/ && \
|
||||||
|
# Strip debug symbols to reduce size
|
||||||
|
strip --strip-unneeded /sycl-runner/*.so* 2>/dev/null; true
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# Stage 2: Runtime image
|
||||||
|
# =============================================================================
|
||||||
FROM ubuntu:24.04
|
FROM ubuntu:24.04
|
||||||
ENV DEBIAN_FRONTEND=noninteractive \
|
ENV DEBIAN_FRONTEND=noninteractive \
|
||||||
TZ=America/Los_Angeles
|
TZ=America/Los_Angeles
|
||||||
|
|
||||||
# Base packages + Intel Vulkan ICD (ANV driver)
|
# Base packages
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install --no-install-recommends -q -y \
|
apt-get install --no-install-recommends -q -y \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
wget \
|
wget \
|
||||||
zstd \
|
zstd \
|
||||||
mesa-vulkan-drivers \
|
ocl-icd-libopencl1 \
|
||||||
ocl-icd-libopencl1 && \
|
libhwloc15 && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Intel GPU runtimes (release 26.05.37020.3)
|
# Intel GPU runtimes (release 26.05.37020.3)
|
||||||
@@ -25,12 +103,14 @@ RUN mkdir -p /tmp/gpu && cd /tmp/gpu && \
|
|||||||
wget https://github.com/intel/compute-runtime/releases/download/26.05.37020.3/libze-intel-gpu1_26.05.37020.3-0_amd64.deb && \
|
wget https://github.com/intel/compute-runtime/releases/download/26.05.37020.3/libze-intel-gpu1_26.05.37020.3-0_amd64.deb && \
|
||||||
dpkg -i *.deb *.ddeb && rm -rf /tmp/gpu
|
dpkg -i *.deb *.ddeb && rm -rf /tmp/gpu
|
||||||
|
|
||||||
# Install official ollama (Vulkan runner provides Intel GPU acceleration)
|
# Install official ollama binary + CPU runners (skip CUDA/MLX/Vulkan)
|
||||||
ARG OLLAMA_VERSION=0.15.6
|
ARG OLLAMA_VERSION=0.15.6
|
||||||
RUN wget -qO- "https://github.com/ollama/ollama/releases/download/v${OLLAMA_VERSION}/ollama-linux-amd64.tar.zst" | \
|
RUN wget -qO- "https://github.com/ollama/ollama/releases/download/v${OLLAMA_VERSION}/ollama-linux-amd64.tar.zst" | \
|
||||||
zstd -d | tar -xf - -C /usr && \
|
zstd -d | tar -xf - -C /usr && \
|
||||||
# Remove CUDA and MLX runners — we only need CPU + Vulkan
|
rm -rf /usr/lib/ollama/cuda_* /usr/lib/ollama/mlx_* /usr/lib/ollama/vulkan
|
||||||
rm -rf /usr/lib/ollama/cuda_* /usr/lib/ollama/mlx_*
|
|
||||||
|
# Install SYCL runner from build stage
|
||||||
|
COPY --from=sycl-builder /sycl-runner/ /usr/lib/ollama/sycl/
|
||||||
|
|
||||||
# Clean up
|
# Clean up
|
||||||
RUN apt-get clean && \
|
RUN apt-get clean && \
|
||||||
@@ -51,14 +131,12 @@ ENV OLLAMA_MAX_LOADED_MODELS=1
|
|||||||
ENV OLLAMA_MAX_QUEUE=512
|
ENV OLLAMA_MAX_QUEUE=512
|
||||||
ENV OLLAMA_MAX_VRAM=0
|
ENV OLLAMA_MAX_VRAM=0
|
||||||
|
|
||||||
# Enable Vulkan backend for Intel GPU acceleration
|
|
||||||
ENV OLLAMA_VULKAN=1
|
|
||||||
|
|
||||||
# Use all GPU layers
|
# Use all GPU layers
|
||||||
ENV OLLAMA_NUM_GPU=999
|
ENV OLLAMA_NUM_GPU=999
|
||||||
|
|
||||||
# Intel GPU tuning
|
# Intel GPU tuning
|
||||||
ENV ZES_ENABLE_SYSMAN=1
|
ENV ZES_ENABLE_SYSMAN=1
|
||||||
|
ENV ONEAPI_DEVICE_SELECTOR=level_zero:0
|
||||||
|
|
||||||
# For Intel Core Ultra Processors (Series 1), code name Meteor Lake
|
# For Intel Core Ultra Processors (Series 1), code name Meteor Lake
|
||||||
ENV IPEX_LLM_NPU_MTL=1
|
ENV IPEX_LLM_NPU_MTL=1
|
||||||
|
|||||||
@@ -2,10 +2,9 @@
|
|||||||
|
|
||||||
[](
|
[](
|
||||||
|
|
||||||
This repo illustrates the use of Ollama with support for Intel ARC GPU based via ipex-llm and Ollama Portable ZIP support. Run the recently released [deepseek-r1](https://github.com/deepseek-ai/DeepSeek-R1) model on your local Intel ARC GPU based PC using Linux
|
Run LLM models on your local Intel GPU using Ollama with Docker.
|
||||||
|
Includes [Open WebUI](https://github.com/open-webui/open-webui) for a
|
||||||
|
browser-based chat interface.
|
||||||
> !Note: All Ollama based ipex-llm defects should be reported directly to the ipex-llm project at https://github.com/intel/ipex-llm
|
|
||||||
|
|
||||||
## Screenshot
|
## Screenshot
|
||||||
|
|
||||||
@@ -13,15 +12,11 @@ This repo illustrates the use of Ollama with support for Intel ARC GPU based via
|
|||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
* Ubuntu 24.04 or newer (for Intel ARC GPU kernel driver support. Tested with Ubuntu 24.04.02
|
* Ubuntu 24.04 or newer
|
||||||
* Installed Docker and Docker-compose tools
|
* Docker and Docker Compose
|
||||||
* Intel ARC series GPU (tested with Intel ARC A770 16GB and Intel(R) Core(TM) Ultra 5 155H integrated GPU)
|
* Intel GPU (tested with Intel Core Ultra 7 155H integrated Arc Graphics — Meteor Lake)
|
||||||
|
|
||||||
# Usage
|
## Quick start
|
||||||
|
|
||||||
The following will build the Ollama with Intel ARC GPU support, and compose those with the public docker image based on OpenWEB UI from https://github.com/open-webui/open-webui
|
|
||||||
|
|
||||||
## Linux
|
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
git clone https://github.com/mattcurf/ollama-intel-gpu
|
git clone https://github.com/mattcurf/ollama-intel-gpu
|
||||||
@@ -29,29 +24,101 @@ cd ollama-intel-gpu
|
|||||||
docker compose up
|
docker compose up
|
||||||
```
|
```
|
||||||
|
|
||||||
> !NOTE
|
Then open http://localhost:3000 in your browser.
|
||||||
> If you have multiple GPU's installed (like integrated and discrete), set the ONEAPI_DEVICE_DELECTOR environment variable in the docker compose file to select the intended device to use.
|
|
||||||
|
|
||||||
Then launch your web browser to http://localhost:3000 to launch the web ui. Create a local OpenWeb UI credential, then click the settings icon in the top right of the screen, then select 'Models', then click 'Show', then download a model like 'llama3.1:8b-instruct-q8_0' for Intel ARC A770 16GB VRAM
|
> If you have multiple GPUs (integrated + discrete), set
|
||||||
|
> `ONEAPI_DEVICE_SELECTOR=level_zero:0` in the docker-compose environment
|
||||||
|
> to select the intended device.
|
||||||
|
|
||||||
### Custom `start-ollama.sh` entrypoint
|
## GPU backend: SYCL vs Vulkan
|
||||||
|
|
||||||
The upstream IPEX-LLM portable zip ships a `start-ollama.sh` that hardcodes
|
Ollama can accelerate inference on Intel GPUs via two backends.
|
||||||
`OLLAMA_HOST=127.0.0.1` and `OLLAMA_KEEP_ALIVE=10m`, preventing the container
|
This repo defaults to **SYCL** (built from upstream llama.cpp's ggml-sycl
|
||||||
from accepting connections via Docker port mapping and ignoring Compose
|
with Intel oneAPI) for best Intel GPU performance.
|
||||||
environment overrides.
|
|
||||||
|
|
||||||
This repo includes a corrected `start-ollama.sh` (mounted read-only into the
|
### Performance comparison (llama-2-7b Q4_0, llama.cpp benchmarks)
|
||||||
container) that honours environment variables set in `docker-compose.yml`,
|
|
||||||
falling back to sensible defaults (`0.0.0.0:11434`, `24h`).
|
|
||||||
|
|
||||||
### Update to the latest IPEX-LLM Portable Zip Version
|
| Intel GPU | Vulkan tok/s | SYCL tok/s | SYCL advantage |
|
||||||
|
|---------------------|-------------|------------|----------------|
|
||||||
|
| MTL iGPU (155H) | ~8-11 | **16** | +45-100% |
|
||||||
|
| ARL-H iGPU | ~10-12 | **17** | +40-70% |
|
||||||
|
| Arc A770 | ~30-35 | **55** | +57-83% |
|
||||||
|
| Flex 170 | ~30-35 | **50** | +43-67% |
|
||||||
|
| Data Center Max 1550| — | **73** | — |
|
||||||
|
|
||||||
To update to the latest portable zip version of IPEX-LLM's Ollama, update the compose file with the build arguments shown below, using the latest `ollama-*.tgz` release from https://github.com/intel/ipex-llm/releases/tag/v2.3.0-nightly , then rebuild the image.
|
### Why SYCL is faster
|
||||||
|
|
||||||
|
* **oneDNN** — Intel's Deep Neural Network Library for optimized GEMM (matrix multiply)
|
||||||
|
* **oneMKL** — Intel Math Kernel Library for optimized math operations
|
||||||
|
* **Level-zero direct access** — lower-overhead GPU communication than Vulkan
|
||||||
|
* **Intel-specific MUL_MAT kernels** — hand-tuned for MTL, ARL, Arc, Flex, PVC architectures
|
||||||
|
* **FP16 compute path** — optional `GGML_SYCL_F16=ON` for faster compute
|
||||||
|
* **Multi-GPU support** — `--split-mode layer` across multiple Intel GPUs
|
||||||
|
|
||||||
|
### Why you might still use Vulkan
|
||||||
|
|
||||||
|
* Shipped in official ollama releases — no build step required
|
||||||
|
* Cross-vendor (Intel, AMD, NVIDIA)
|
||||||
|
* Simpler deployment, smaller image
|
||||||
|
|
||||||
|
To switch to Vulkan, see the `Dockerfile.vulkan` (if provided) or use the
|
||||||
|
official ollama Docker image with `OLLAMA_VULKAN=1`.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
The Docker image builds in two stages:
|
||||||
|
|
||||||
|
1. **Build stage** (`intel/oneapi-basekit:2025.1.1`) — clones ollama v0.15.6
|
||||||
|
source, fetches the matching `ggml-sycl` backend from upstream llama.cpp
|
||||||
|
(commit `a5bb8ba4`, the exact ggml version ollama vendors), patches two
|
||||||
|
ollama-specific API divergences (`batch_size` parameter, `GGML_TENSOR_FLAG_COMPUTE`
|
||||||
|
removal), and compiles `libggml-sycl.so` with `icpx` + oneAPI.
|
||||||
|
2. **Runtime stage** (`ubuntu:24.04`) — minimal image with Intel GPU drivers,
|
||||||
|
the official ollama binary, and the SYCL runner + oneAPI runtime libraries.
|
||||||
|
|
||||||
|
### Key components
|
||||||
|
|
||||||
|
| Component | Source | Purpose |
|
||||||
|
|-----------|--------|---------|
|
||||||
|
| ollama binary | Official v0.15.6 release | Go server, API, model management |
|
||||||
|
| ggml-sycl backend | llama.cpp @ `a5bb8ba4` | `libggml-sycl.so` compiled with oneAPI |
|
||||||
|
| oneAPI runtime | Intel oneAPI 2025.1.1 | SYCL runtime, oneMKL, oneDNN, TBB |
|
||||||
|
| GPU drivers | Intel compute-runtime 26.05 | Level-zero, IGC, OpenCL ICD |
|
||||||
|
| patch-sycl.py | This repo | Patches ggml-sycl for ollama API compat |
|
||||||
|
| Web UI | Open WebUI | Browser-based chat interface |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Key environment variables in `docker-compose.yml`:
|
||||||
|
|
||||||
|
| Variable | Default | Description |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| `OLLAMA_HOST` | `0.0.0.0` | Listen address |
|
||||||
|
| `OLLAMA_KEEP_ALIVE` | `24h` | Keep models loaded in memory |
|
||||||
|
| `OLLAMA_NUM_PARALLEL` | `1` | Parallel request handling |
|
||||||
|
| `OLLAMA_MAX_LOADED_MODELS` | `1` | Max models in memory |
|
||||||
|
| `ONEAPI_DEVICE_SELECTOR` | `level_zero:0` | Select Intel GPU device |
|
||||||
|
|
||||||
|
## How the SYCL build works
|
||||||
|
|
||||||
|
Ollama intentionally excludes `ggml-sycl` from its vendored ggml source tree
|
||||||
|
(it keeps the header `ggml-sycl.h` but not the implementation). This repo
|
||||||
|
rebuilds it by:
|
||||||
|
|
||||||
|
1. Cloning the ollama source (for the ggml build system and headers)
|
||||||
|
2. Fetching `ggml-sycl` from the **exact llama.cpp commit** that ollama
|
||||||
|
vendors (`a5bb8ba4`) to ensure ABI compatibility
|
||||||
|
3. Applying two patches via `patch-sycl.py`:
|
||||||
|
- **`graph_compute` signature**: ollama adds an `int batch_size` parameter
|
||||||
|
- **`GGML_TENSOR_FLAG_COMPUTE`**: ollama removes this enum value, so the
|
||||||
|
skip-check in the compute loop must be removed (otherwise ALL nodes
|
||||||
|
get skipped, producing garbage output)
|
||||||
|
4. Building with Intel oneAPI `icpx` compiler, linking oneMKL and oneDNN
|
||||||
|
|
||||||
## References
|
## References
|
||||||
|
|
||||||
|
* [Intel GPU driver installation](https://dgpu-docs.intel.com/driver/client/overview.html)
|
||||||
* https://dgpu-docs.intel.com/driver/client/overview.html
|
* [llama.cpp SYCL backend docs](https://github.com/ggml-org/llama.cpp/blob/master/docs/backend/SYCL.md)
|
||||||
* https://github.com/intel/ipex-llm/blob/main/docs/mddocs/Quickstart/llamacpp_portable_zip_gpu_quickstart.md
|
* [Intel oneAPI base toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html)
|
||||||
* https://github.com/intel/ipex-llm/releases/download/v2.2.0-nightly/ollama-ipex-llm-2.2.0b20250313-ubuntu.tgz
|
* [ollama GitHub](https://github.com/ollama/ollama)
|
||||||
|
* [Open WebUI](https://github.com/open-webui/open-webui)
|
||||||
|
|||||||
+4
-3
@@ -8,8 +8,7 @@ services:
|
|||||||
container_name: ollama-intel-gpu
|
container_name: ollama-intel-gpu
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
devices:
|
devices:
|
||||||
# - /dev/dri:/dev/dri
|
- /dev/dri:/dev/dri
|
||||||
- /dev/dri/renderD128:/dev/dri/renderD128
|
|
||||||
volumes:
|
volumes:
|
||||||
- /tmp/.X11-unix:/tmp/.X11-unix
|
- /tmp/.X11-unix:/tmp/.X11-unix
|
||||||
- ollama-intel-gpu:/root/.ollama
|
- ollama-intel-gpu:/root/.ollama
|
||||||
@@ -17,7 +16,9 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
- DISPLAY=${DISPLAY}
|
- DISPLAY=${DISPLAY}
|
||||||
- OLLAMA_HOST=0.0.0.0
|
- OLLAMA_HOST=0.0.0.0
|
||||||
- OLLAMA_VULKAN=1
|
- OLLAMA_DEBUG=1
|
||||||
|
- ONEAPI_DEVICE_SELECTOR=level_zero:0
|
||||||
|
- ZES_ENABLE_SYSMAN=1
|
||||||
- OLLAMA_DEFAULT_KEEPALIVE=6h
|
- OLLAMA_DEFAULT_KEEPALIVE=6h
|
||||||
- OLLAMA_KEEP_ALIVE=24h
|
- OLLAMA_KEEP_ALIVE=24h
|
||||||
- OLLAMA_MAX_LOADED_MODELS=1
|
- OLLAMA_MAX_LOADED_MODELS=1
|
||||||
|
|||||||
@@ -0,0 +1,72 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Patch upstream ggml-sycl to match ollama's modified ggml backend API.
|
||||||
|
|
||||||
|
ollama v0.15.6 vendors ggml from llama.cpp commit a5bb8ba4 but makes two
|
||||||
|
divergences from upstream:
|
||||||
|
|
||||||
|
1. graph_compute() has an extra 'int batch_size' parameter (ollama addition)
|
||||||
|
2. GGML_TENSOR_FLAG_COMPUTE enum value is removed from ollama's ggml.h,
|
||||||
|
so the skip-check in the compute loop must be removed entirely
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
path = sys.argv[1]
|
||||||
|
with open(path, "r") as f:
|
||||||
|
src = f.read()
|
||||||
|
|
||||||
|
original = src
|
||||||
|
|
||||||
|
# 1. Fix graph_compute signature: add 'int batch_size' parameter
|
||||||
|
# The function is defined as:
|
||||||
|
# static ggml_status ggml_backend_sycl_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
|
||||||
|
src = re.sub(
|
||||||
|
r'(static\s+(?:enum\s+)?ggml_status\s+ggml_backend_sycl_graph_compute\s*\([^)]*cgraph)\s*\)',
|
||||||
|
r'\1, int batch_size)',
|
||||||
|
src,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 2. Add GGML_UNUSED(batch_size) inside the function body (after the opening brace)
|
||||||
|
src = re.sub(
|
||||||
|
r'(ggml_backend_sycl_graph_compute\([^)]*int\s+batch_size\)\s*\{)',
|
||||||
|
r'\1\n GGML_UNUSED(batch_size);',
|
||||||
|
src,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3. Remove GGML_TENSOR_FLAG_COMPUTE skip-check entirely.
|
||||||
|
# In ollama's vendored ggml, this flag doesn't exist (removed from the enum).
|
||||||
|
# Since ollama never sets bit 16, ALL nodes would be skipped, producing garbage.
|
||||||
|
# The actual code looks like:
|
||||||
|
# if ((node->flags & GGML_TENSOR_FLAG_COMPUTE) == 0) {
|
||||||
|
# continue;
|
||||||
|
# }
|
||||||
|
src = re.sub(
|
||||||
|
r'\s*if\s*\(\(node->flags\s*&\s*GGML_TENSOR_FLAG_COMPUTE\)\s*==\s*0\)\s*\{\s*continue;\s*\}',
|
||||||
|
'',
|
||||||
|
src,
|
||||||
|
)
|
||||||
|
|
||||||
|
if src == original:
|
||||||
|
print(f"WARNING: No changes made to {path}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
with open(path, "w") as f:
|
||||||
|
f.write(src)
|
||||||
|
|
||||||
|
# Verify patches applied
|
||||||
|
checks = [
|
||||||
|
("batch_size parameter", "int batch_size" in src),
|
||||||
|
("GGML_UNUSED(batch_size)", "GGML_UNUSED(batch_size)" in src),
|
||||||
|
("GGML_TENSOR_FLAG_COMPUTE removed", "GGML_TENSOR_FLAG_COMPUTE" not in src),
|
||||||
|
]
|
||||||
|
for name, ok in checks:
|
||||||
|
status = "OK" if ok else "FAILED"
|
||||||
|
print(f" [{status}] {name}")
|
||||||
|
|
||||||
|
if all(ok for _, ok in checks):
|
||||||
|
print(f"Patched {path} successfully")
|
||||||
|
else:
|
||||||
|
print(f"ERROR: Some patches failed on {path}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
Reference in New Issue
Block a user