Update to latest ipex-llm dockerfile 20250211
This commit is contained in:
+4
-6
@@ -1,12 +1,10 @@
|
||||
FROM intelanalytics/ipex-llm-inference-cpp-xpu:2.2.0-SNAPSHOT
|
||||
|
||||
ENV ZES_ENABLE_SYSMAN=1
|
||||
ENV OLLAMA_HOST=0.0.0.0:11434
|
||||
FROM intelanalytics/ipex-llm-inference-cpp-xpu:latest
|
||||
|
||||
RUN mkdir -p /llm/ollama; \
|
||||
cd /llm/ollama; \
|
||||
init-ollama;
|
||||
|
||||
WORKDIR /llm/ollama
|
||||
|
||||
ENTRYPOINT ["./ollama", "serve"]
|
||||
COPY commands.sh /llm/ollama/commands.sh
|
||||
RUN ["chmod", "+x", "/llm/ollama/commands.sh"]
|
||||
ENTRYPOINT ["/llm/ollama/commands.sh"]
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
source ipex-llm-init --gpu --device $DEVICE
|
||||
export OLLAMA_HOST=0.0.0.0:11434
|
||||
cd /llm/ollama
|
||||
./ollama serve
|
||||
+1
-4
@@ -1,19 +1,16 @@
|
||||
version: "3.9"
|
||||
services:
|
||||
ollama-intel-gpu:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
container_name: ollama-intel-gpu
|
||||
image: ollama-intel-gpu:latest
|
||||
restart: always
|
||||
devices:
|
||||
- /dev/dri:/dev/dri
|
||||
volumes:
|
||||
- /tmp/.X11-unix:/tmp/.X11-unix
|
||||
- ollama-intel-gpu:/root/.ollama
|
||||
environment:
|
||||
- DISPLAY=${DISPLAY}
|
||||
- DEVICE=Arc
|
||||
ollama-webui:
|
||||
image: ghcr.io/open-webui/open-webui
|
||||
container_name: ollama-webui
|
||||
|
||||
Reference in New Issue
Block a user