Merge branch 'main' of github.com:eleiton/ollama-intel-arc
This commit is contained in:
30
docker-compose.ramalama.yml
Normal file
30
docker-compose.ramalama.yml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
version: "3.9"
|
||||||
|
|
||||||
|
services:
|
||||||
|
ramalama:
|
||||||
|
build:
|
||||||
|
context: ./ramalama
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
image: ramalama-ipex:local
|
||||||
|
container_name: ramalama-ipex
|
||||||
|
init: true
|
||||||
|
devices:
|
||||||
|
- /dev/dri:/dev/dri
|
||||||
|
security_opt:
|
||||||
|
- seccomp=unconfined
|
||||||
|
volumes:
|
||||||
|
- ramalama-models:/var/lib/ramalama/store
|
||||||
|
- /tmp:/tmp:rw
|
||||||
|
environment:
|
||||||
|
SYCL_DEVICE_FILTER: "level_zero:gpu:0"
|
||||||
|
SYCL_CACHE_PERSISTENT: 1
|
||||||
|
ZES_ENABLE_SYSMAN: 1
|
||||||
|
RAMALAMA_NO_CONTAINER: "true"
|
||||||
|
RAMALAMA_IN_CONTAINER: "true"
|
||||||
|
ports:
|
||||||
|
- 11434:8080
|
||||||
|
restart: unless-stopped
|
||||||
|
tty: true
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
ramalama-models: {}
|
||||||
7
ramalama/Dockerfile
Normal file
7
ramalama/Dockerfile
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
FROM quay.io/ramalama/intel-gpu:latest
|
||||||
|
|
||||||
|
RUN --mount=type=cache,target=/var/cache/dnf \
|
||||||
|
dnf install -y ramalama && \
|
||||||
|
dnf clean all
|
||||||
|
|
||||||
|
CMD ["sleep", "infinity"]
|
||||||
Reference in New Issue
Block a user