Enabling intel's official pre-build image of the ipex-llm-inference-cpp-xpu, instead of building it locally
This commit is contained in:
@@ -1,10 +1,7 @@
|
||||
version: '3'
|
||||
services:
|
||||
ollama-intel-arc:
|
||||
build:
|
||||
context: ipex-llm-inference-cpp-xpu
|
||||
dockerfile: Dockerfile
|
||||
image: ipex-llm-inference-cpp-xpu:latest
|
||||
image: intelanalytics/ipex-llm-inference-cpp-xpu:latest
|
||||
container_name: ollama-intel-arc
|
||||
restart: unless-stopped
|
||||
devices:
|
||||
|
||||
Reference in New Issue
Block a user