diff --git a/README.md b/README.md index e5f9280..2c2525f 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # ollama-intel-gpu -This repo illlustrates the use of Ollama with support for Intel ARC GPU based via SYCL. Run the recently released [Meta llama3.1](https://llama.meta.com/) or [Microsoft phi3](https://news.microsoft.com/source/features/ai/the-phi-3-small-language-models-with-big-potential) models on your local Intel ARC GPU based PC using Linux or Windows WSL2. +This repo illlustrates the use of Ollama with support for Intel ARC GPU based via ipex-llm. Run the recently released [deepseek-r1](https://github.com/deepseek-ai/DeepSeek-R1) model on your local Intel ARC GPU based PC using Linux or Windows WSL2. ## Screenshot ![screenshot](doc/screenshot.png) diff --git a/docker-compose-wsl2.yml b/docker-compose-wsl2.yml index 99f9228..7efe6c6 100644 --- a/docker-compose-wsl2.yml +++ b/docker-compose-wsl2.yml @@ -17,7 +17,7 @@ services: environment: - DISPLAY=${DISPLAY} ollama-webui: - image: ghcr.io/open-webui/open-webui:v0.3.35 + image: ghcr.io/open-webui/open-webui container_name: ollama-webui volumes: - ollama-webui:/app/backend/data diff --git a/docker-compose.yml b/docker-compose.yml index 84ca524..f06a16d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -15,7 +15,7 @@ services: environment: - DISPLAY=${DISPLAY} ollama-webui: - image: ghcr.io/open-webui/open-webui:v0.3.35 + image: ghcr.io/open-webui/open-webui container_name: ollama-webui volumes: - ollama-webui:/app/backend/data