34 lines
938 B
YAML
34 lines
938 B
YAML
services:
|
|
ollama-intel-gpu:
|
|
build:
|
|
context: .
|
|
dockerfile: Dockerfile
|
|
args:
|
|
IPEXLLM_PORTABLE_ZIP_FILENAME: ollama-ipex-llm-2.2.0b20250313-ubuntu.tgz # update from https://github.com/intel/ipex-llm/releases/tag/v2.2.0-nightly
|
|
container_name: ollama-intel-gpu
|
|
restart: always
|
|
devices:
|
|
- /dev/dri:/dev/dri
|
|
volumes:
|
|
- ollama-intel-gpu:/root/.ollama
|
|
environment:
|
|
- ONEAPI_DEVICE_SELECTOR=level_zero:0
|
|
- IPEX_LLM_NUM_CTX=16384
|
|
ollama-webui:
|
|
image: ghcr.io/open-webui/open-webui
|
|
container_name: ollama-webui
|
|
volumes:
|
|
- ollama-webui:/app/backend/data
|
|
depends_on:
|
|
- ollama-intel-gpu
|
|
ports:
|
|
- ${OLLAMA_WEBUI_PORT-3000}:8080
|
|
environment:
|
|
- OLLAMA_BASE_URL=http://ollama-intel-gpu:11434
|
|
extra_hosts:
|
|
- host.docker.internal:host-gateway
|
|
restart: unless-stopped
|
|
volumes:
|
|
ollama-webui: {}
|
|
ollama-intel-gpu: {}
|