Files
ollama-intel-gpu/docker-compose.yml
Andriy Oblivantsev c56646e7e7 Switch GPU backend from Vulkan to SYCL for ~2x inference performance on Intel GPUs
Build ggml-sycl from upstream llama.cpp (commit a5bb8ba4, matching ollama's
vendored ggml) using Intel oneAPI 2025.1.1 in a multi-stage Docker build.
Patch two ollama-specific API divergences via patch-sycl.py: added batch_size
parameter to graph_compute, removed GGML_TENSOR_FLAG_COMPUTE skip-check that
caused all compute nodes to be bypassed.

Tested: gemma3:1b — 27/27 layers on GPU, 10.2 tok/s gen, 65.3 tok/s prompt eval.
Co-authored-by: Cursor <cursoragent@cursor.com>
2026-02-12 17:28:23 +00:00

63 lines
1.6 KiB
YAML

services:
ollama-intel-gpu:
build:
context: .
dockerfile: Dockerfile
args:
OLLAMA_VERSION: "0.15.6"
container_name: ollama-intel-gpu
restart: unless-stopped
devices:
- /dev/dri:/dev/dri
volumes:
- /tmp/.X11-unix:/tmp/.X11-unix
- ollama-intel-gpu:/root/.ollama
shm_size: "16G"
environment:
- DISPLAY=${DISPLAY}
- OLLAMA_HOST=0.0.0.0
- OLLAMA_DEBUG=1
- ONEAPI_DEVICE_SELECTOR=level_zero:0
- ZES_ENABLE_SYSMAN=1
- OLLAMA_DEFAULT_KEEPALIVE=6h
- OLLAMA_KEEP_ALIVE=24h
- OLLAMA_MAX_LOADED_MODELS=1
- OLLAMA_MAX_QUEUE=512
- OLLAMA_MAX_VRAM=0
- OLLAMA_NUM_PARALLEL=1
#- OLLAMA_NOHISTORY=false
#- OLLAMA_NOPRUNE=false
ports:
- 11434:11434
ollama-webui:
image: ghcr.io/open-webui/open-webui:latest
container_name: ollama-webui
volumes:
- ./webui/data:/app/backend/data
# - ollama-webui:/app/backend/data
depends_on:
- ollama-intel-gpu
ports:
- ${OLLAMA_WEBUI_PORT-3000}:8080
environment:
- OLLAMA_BASE_URL=http://ollama-intel-gpu:11434
- OLLAMA_DEFAULT_KEEPALIVE=6h
#- OPENAI_API_BASE_URL=
#- OPENAI_API_KEY=
#
# AUTOMATIC1111_BASE_URL="http://localhost:7860"
- WEBUI_AUTH=False
- ENABLE_RAG_WEB_SEARCH=True
# DO NOT TRACK
- SCARF_NO_ANALYTICS=true
- DO_NOT_TRACK=true
- ANONYMIZED_TELEMETRY=false
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
volumes:
ollama-webui: {}
ollama-intel-gpu: {}