Releasing first version of the solution

This commit is contained in:
eleiton
2024-09-29 00:47:39 +02:00
parent 94ef2044c3
commit 5dd7e9a28c
5 changed files with 158 additions and 1 deletions

69
Dockerfile Normal file
View File

@@ -0,0 +1,69 @@
FROM ubuntu:24.04
ENV DEBIAN_FRONTEND=noninteractive
ENV PIP_BREAK_SYSTEM_PACKAGES=1
ENV OLLAMA_NUM_GPU=999
ENV OLLAMA_HOST=0.0.0.0:11434
# Install base packages
RUN apt update && \
apt install --no-install-recommends -q -y \
wget \
gnupg \
ca-certificates \
python3-pip \
pkg-config \
build-essential \
python3-dev \
cmake
# Install IPEX-LLM on Linux with Intel GPU
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg && \
echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | \
tee /etc/apt/sources.list.d/intel-gpu-jammy.list && \
apt update && \
apt install --no-install-recommends -q -y \
udev \
level-zero \
libigdgmm12 \
intel-level-zero-gpu \
intel-opencl-icd
# Install OneAPI packages
RUN wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | \
gpg --dearmor --output /usr/share/keyrings/oneapi-archive-keyring.gpg && \
echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | \
tee /etc/apt/sources.list.d/oneAPI.list && \
apt update && \
apt install --no-install-recommends -q -y \
intel-oneapi-common-vars \
intel-oneapi-common-oneapi-vars \
intel-oneapi-diagnostics-utility \
intel-oneapi-compiler-dpcpp-cpp \
intel-oneapi-dpcpp-ct \
intel-oneapi-mkl \
intel-oneapi-mkl-devel \
intel-oneapi-mpi \
intel-oneapi-mpi-devel \
intel-oneapi-dal \
intel-oneapi-dal-devel \
intel-oneapi-ippcp \
intel-oneapi-ippcp-devel \
intel-oneapi-ipp \
intel-oneapi-ipp-devel \
intel-oneapi-tlt \
intel-oneapi-ccl \
intel-oneapi-ccl-devel \
intel-oneapi-dnnl-devel \
intel-oneapi-dnnl \
intel-oneapi-tcm-1.0
# Install serve.sh script
COPY ./scripts/serve.sh /usr/share/lib/serve.sh
# Install ipex-llm[cpp] using pip
RUN pip install --pre --upgrade ipex-llm[cpp]
# Set entrypoint to run the serve.sh script
ENTRYPOINT ["/bin/bash", "/usr/share/lib/serve.sh"]

View File

@@ -1,2 +1,46 @@
# ollama-intel-arc
Make use of Intel Arc Series GPU to Run Ollama with Open WebUI to interact with Large Language Models (LLM)
A Docker-based setup for running Ollama as a backend and Open WebUI as a frontend, leveraging Intel Arc Series GPUs on Linux systems.
## Overview
This repository provides a convenient way to run Ollama as a backend and Open WebUI as a frontend, allowing you to interact with Large Language Models (LLM) using an Intel Arc Series GPU on your Linux system.
![screenshot](resources/open-webui.png)
## Services
1. Ollama
* Runs llama.cpp and Ollama with IPEX-LLM on your Linux computer with Intel GPU.
* Built following the guidelines from [Intel](https://github.com/intel-analytics/ipex-llm/blob/main/docs/mddocs/Quickstart/llama_cpp_quickstart.md).
* Uses [Ubuntu 24.04 LTS](https://ubuntu.com/blog/tag/ubuntu-24-04-lts), Ubuntu's latest stable version, as a container.
* Uses the latest versions of required packages, prioritizing cutting-edge features over stability.
* Exposes port `11434` for connecting other tools to your Ollama service.
* To validate this setup, run: `curl http://localhost:11434/`
2. Open WebUI
* The official distribution of Open WebUI.
* `WEBUI_AUTH` is turned off for authentication-free usage.
* `ENABLE_OPENAI_API` and ENABLE_OLLAMA_API flags are set to off and on, respectively, allowing interactions via Ollama only.
## Setup
### Fedora
```bash
$ git clone https://github.com/eleiton/ollama-intel-arc.git
$ cd ollama-intel-arc
$ podman compose up
```
### Others (Ubuntu 24.04 or newer)
```bash
$ git clone https://github.com/eleiton/ollama-intel-arc.git
$ cd ollama-intel-arc
$ docker compose up
```
## Usage
* Run the services using the setup instructions above.
* Open your web browser to http://localhost:3000 to access the Open WebUI web page.
* For more information on using Open WebUI, refer to the official documentation at https://docs.openwebui.com/ .
## References
* [Intel guidelines for installing Linux GPU support](https://github.com/intel-analytics/ipex-llm/blob/main/docs/mddocs/Quickstart/install_linux_gpu.md)
* [Open WebUI documentation](https://docs.openwebui.com/)

34
docker-compose.yml Normal file
View File

@@ -0,0 +1,34 @@
version: "1.0"
services:
ollama-ipex-llm:
build:
context: .
dockerfile: Dockerfile
container_name: ollama-ipex-llm
image: ollama-ipex-llm:latest
restart: unless-stopped
devices:
- /dev/dri:/dev/dri
volumes:
- ollama-volume:/root/.ollama
ports:
- 11434:11434
open-webui:
image: ghcr.io/open-webui/open-webui:latest
container_name: open-webui
volumes:
- open-webui-volume:/app/backend/data
depends_on:
- ollama-ipex-llm
ports:
- 3000:8080
environment:
- WEBUI_AUTH=False
- ENABLE_OPENAI_API=False
- ENABLE_OLLAMA_API=True
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
volumes:
ollama-volume: {}
open-webui-volume: {}

BIN
resources/open-webui.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

10
scripts/serve.sh Normal file
View File

@@ -0,0 +1,10 @@
#!/bin/sh
source /opt/intel/oneapi/setvars.sh
export USE_XETLA=OFF
export ZES_ENABLE_SYSMAN=1
export SYCL_CACHE_PERSISTENT=1
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
export ONEAPI_DEVICE_SELECTOR=level_zero:0
/usr/local/lib/python3.12/dist-packages/bigdl/cpp/libs/ollama serve