Adding initial support for ramalama

This commit is contained in:
eleiton
2025-12-01 23:20:55 +01:00
parent e7afd3f671
commit e04b83d39d
2 changed files with 37 additions and 0 deletions

View File

@@ -0,0 +1,30 @@
version: "3.9"
services:
ramalama:
build:
context: ./ramalama
dockerfile: Dockerfile
image: ramalama-ipex:local
container_name: ramalama-ipex
init: true
devices:
- /dev/dri:/dev/dri
security_opt:
- seccomp=unconfined
volumes:
- ramalama-models:/var/lib/ramalama/store
- /tmp:/tmp:rw
environment:
SYCL_DEVICE_FILTER: "level_zero:gpu:0"
SYCL_CACHE_PERSISTENT: 1
ZES_ENABLE_SYSMAN: 1
RAMALAMA_NO_CONTAINER: "true"
RAMALAMA_IN_CONTAINER: "true"
ports:
- 11434:8080
restart: unless-stopped
tty: true
volumes:
ramalama-models: {}

7
ramalama/Dockerfile Normal file
View File

@@ -0,0 +1,7 @@
FROM quay.io/ramalama/intel-gpu:latest
RUN --mount=type=cache,target=/var/cache/dnf \
dnf install -y ramalama && \
dnf clean all
CMD ["sleep", "infinity"]