Building ipex-llm image locally, since Intel remove the pre-built image.
This commit is contained in:
@@ -79,8 +79,8 @@ When using Open WebUI, you should see this partial output in your console, indic
|
||||
* (Optional) If you want to stay in the SD.Next UI, feel free to explore (3).
|
||||

|
||||
* For more information on using SD.Next, refer to the official [documentation](https://vladmandic.github.io/sdnext-docs/).
|
||||
* Open your web browser to http://localhost:3000 to access the Open WebUI web page.
|
||||
* Go to the administrator [settings](http://localhost:3000/admin/settings) page.
|
||||
* Open your web browser to http://localhost:4040 to access the Open WebUI web page.
|
||||
* Go to the administrator [settings](http://localhost:4040/admin/settings) page.
|
||||
* Go to the Image section (1)
|
||||
* Make sure all settings look good, and validate them pressing the refresh button (2)
|
||||
* (Optional) Save any changes if you made them. (3)
|
||||
@@ -123,4 +123,7 @@ $ /llm/ollama/ollama -v
|
||||
|
||||
## References
|
||||
* [Open WebUI documentation](https://docs.openwebui.com/)
|
||||
* [Intel ipex-llm releases](https://github.com/intel/ipex-llm/releases)
|
||||
* [Docker - Intel ipex-llm tags](https://hub.docker.com/r/intelanalytics/ipex-llm-serving-xpu/tags)
|
||||
* [Docker - Intel extension for pytorch](https://hub.docker.com/r/intel/intel-extension-for-pytorch/tags)
|
||||
* [GitHub - Intel ipex-llm tags](https://github.com/intel/ipex-llm/tags)
|
||||
* [GitHub - Intel extension for pytorch](https://github.com/intel/intel-extension-for-pytorch/tags)
|
||||
@@ -1,7 +1,10 @@
|
||||
version: '3'
|
||||
services:
|
||||
ollama-intel-arc:
|
||||
image: intelanalytics/ipex-llm-inference-cpp-xpu:latest
|
||||
build:
|
||||
context: ipex-llm-inference-cpp-xpu
|
||||
dockerfile: Dockerfile
|
||||
image: ipex-llm-inference-cpp-xpu:latest
|
||||
container_name: ollama-intel-arc
|
||||
restart: unless-stopped
|
||||
devices:
|
||||
@@ -27,7 +30,7 @@ services:
|
||||
depends_on:
|
||||
- ollama-intel-arc
|
||||
ports:
|
||||
- 3000:8080
|
||||
- 4040:8080
|
||||
environment:
|
||||
- WEBUI_AUTH=False
|
||||
- ENABLE_OPENAI_API=False
|
||||
@@ -36,9 +39,9 @@ services:
|
||||
- IMAGE_GENERATION_ENGINE=automatic1111
|
||||
- IMAGE_GENERATION_MODEL=dreamshaper_8
|
||||
- IMAGE_SIZE=400x400
|
||||
- IMAGE_STEPS=30
|
||||
- IMAGE_STEPS=8
|
||||
- AUTOMATIC1111_BASE_URL=http://sdnext-ipex:7860/
|
||||
- AUTOMATIC1111_CFG_SCALE=9
|
||||
- AUTOMATIC1111_CFG_SCALE=2
|
||||
- AUTOMATIC1111_SAMPLER=DPM++ SDE
|
||||
- AUTOMATIC1111_SCHEDULER=Karras
|
||||
extra_hosts:
|
||||
|
||||
94
ipex-llm-inference-cpp-xpu/Dockerfile
Normal file
94
ipex-llm-inference-cpp-xpu/Dockerfile
Normal file
@@ -0,0 +1,94 @@
|
||||
# Copyright (C) 2025 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Stage 1: Build stage to handle file preparation
|
||||
FROM ubuntu:22.04 as build
|
||||
|
||||
# Copy the files to the build image
|
||||
COPY ./start-llama-cpp.sh ./start-ollama.sh ./benchmark_llama-cpp.sh /llm/scripts/
|
||||
|
||||
# Stage 2: Final image that only includes necessary runtime artifacts
|
||||
FROM intel/oneapi-basekit:2025.0.2-0-devel-ubuntu22.04
|
||||
|
||||
# Copy the scripts from the build stage
|
||||
COPY --from=build /llm/scripts /llm/scripts/
|
||||
|
||||
# Set build arguments for proxy
|
||||
ARG http_proxy
|
||||
ARG https_proxy
|
||||
# Disable pip cache
|
||||
ARG PIP_NO_CACHE_DIR=false
|
||||
|
||||
# Set environment variables
|
||||
ENV TZ=Asia/Shanghai \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
SYCL_CACHE_PERSISTENT=1
|
||||
|
||||
# Install dependencies and configure the environment
|
||||
RUN set -eux && \
|
||||
#
|
||||
# Ensure scripts are executable
|
||||
chmod +x /llm/scripts/*.sh && \
|
||||
#
|
||||
# Configure Intel OneAPI and GPU repositories
|
||||
wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/intel-oneapi-archive-keyring.gpg > /dev/null && \
|
||||
echo "deb [signed-by=/usr/share/keyrings/intel-oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | tee /etc/apt/sources.list.d/oneAPI.list && \
|
||||
chmod 644 /usr/share/keyrings/intel-oneapi-archive-keyring.gpg && \
|
||||
rm /etc/apt/sources.list.d/intel-graphics.list && \
|
||||
wget -O- https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor | tee /usr/share/keyrings/intel-graphics.gpg > /dev/null && \
|
||||
echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc" | tee /etc/apt/sources.list.d/intel.gpu.jammy.list && \
|
||||
chmod 644 /usr/share/keyrings/intel-graphics.gpg && \
|
||||
#
|
||||
# Update and install basic dependencies
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
curl wget git sudo libunwind8-dev vim less gnupg gpg-agent software-properties-common && \
|
||||
#
|
||||
# Set timezone
|
||||
ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && \
|
||||
echo $TZ > /etc/timezone && \
|
||||
#
|
||||
# Install Python 3.11
|
||||
add-apt-repository ppa:deadsnakes/ppa -y && \
|
||||
apt-get install -y --no-install-recommends python3.11 python3-pip python3.11-dev python3.11-distutils python3-wheel && \
|
||||
rm /usr/bin/python3 && ln -s /usr/bin/python3.11 /usr/bin/python3 && \
|
||||
ln -s /usr/bin/python3 /usr/bin/python && \
|
||||
#
|
||||
# Install pip and essential Python packages
|
||||
wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py && \
|
||||
python3 get-pip.py && rm get-pip.py && \
|
||||
pip install --upgrade requests argparse urllib3 && \
|
||||
pip install --pre --upgrade ipex-llm[cpp] && \
|
||||
pip install transformers==4.36.2 transformers_stream_generator einops tiktoken && \
|
||||
#
|
||||
# Remove breaks install packages
|
||||
apt-get remove -y libze-dev libze-intel-gpu1 && \
|
||||
#
|
||||
# Install Intel GPU OpenCL Driver and Compute Runtime
|
||||
mkdir -p /tmp/gpu && cd /tmp/gpu && \
|
||||
echo "Downloading Intel Compute Runtime (24.52) for Gen12+..." && \
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-core-2_2.5.6+18417_amd64.deb && \
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-opencl-2_2.5.6+18417_amd64.deb && \
|
||||
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-level-zero-gpu_1.6.32224.5_amd64.deb && \
|
||||
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-opencl-icd_24.52.32224.5_amd64.deb && \
|
||||
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/libigdgmm12_22.5.5_amd64.deb && \
|
||||
#
|
||||
echo "Downloading Legacy Compute Runtime (24.35) for pre-Gen12 support..." && \
|
||||
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.22/intel-level-zero-gpu-legacy1_1.3.30872.22_amd64.deb && \
|
||||
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.22/intel-opencl-icd-legacy1_24.35.30872.22_amd64.deb && \
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.20/intel-igc-core_1.0.17537.20_amd64.deb && \
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.20/intel-igc-opencl_1.0.17537.20_amd64.deb && \
|
||||
#
|
||||
dpkg -i *.deb && rm -rf /tmp/gpu && \
|
||||
#
|
||||
# Install oneAPI Level Zero Loader
|
||||
mkdir /tmp/level-zero && cd /tmp/level-zero && \
|
||||
wget https://github.com/oneapi-src/level-zero/releases/download/v1.20.2/level-zero_1.20.2+u22.04_amd64.deb && \
|
||||
wget https://github.com/oneapi-src/level-zero/releases/download/v1.20.2/level-zero-devel_1.20.2+u22.04_amd64.deb && \
|
||||
dpkg -i *.deb && rm -rf /tmp/level-zero && \
|
||||
#
|
||||
# Clean up unnecessary dependencies to reduce image size
|
||||
find /usr/lib/python3/dist-packages/ -name 'blinker*' -exec rm -rf {} + && \
|
||||
rm -rf /root/.cache/Cypress
|
||||
|
||||
WORKDIR /llm/
|
||||
194
ipex-llm-inference-cpp-xpu/README.md
Normal file
194
ipex-llm-inference-cpp-xpu/README.md
Normal file
@@ -0,0 +1,194 @@
|
||||
## Run llama.cpp/Ollama/Open-WebUI on an Intel GPU via Docker
|
||||
|
||||
### Install Docker
|
||||
|
||||
1. Linux Installation
|
||||
|
||||
Follow the instructions in this [guide](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/DockerGuides/docker_windows_gpu.html#linux) to install Docker on Linux.
|
||||
|
||||
2. Windows Installation
|
||||
|
||||
For Windows installation, refer to this [guide](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/DockerGuides/docker_windows_gpu.html#install-docker-desktop-for-windows).
|
||||
|
||||
#### Setting Docker on windows
|
||||
Need to enable `--net=host`,follow [this guide](https://docs.docker.com/network/drivers/host/#docker-desktop) so that you can easily access the service running on the docker. The [v6.1x kernel version wsl]( https://learn.microsoft.com/en-us/community/content/wsl-user-msft-kernel-v6#1---building-the-microsoft-linux-kernel-v61x) is recommended to use.Otherwise, you may encounter the blocking issue before loading the model to GPU.
|
||||
|
||||
### Build the Image
|
||||
To build the `ipex-llm-inference-cpp-xpu` Docker image, use the following command:
|
||||
|
||||
```bash
|
||||
docker build \
|
||||
--build-arg http_proxy=.. \
|
||||
--build-arg https_proxy=.. \
|
||||
--build-arg no_proxy=.. \
|
||||
--rm --no-cache -t intelanalytics/ipex-llm-inference-cpp-xpu:latest .
|
||||
```
|
||||
|
||||
|
||||
### Start Docker Container
|
||||
|
||||
To map the `xpu` into the container, you need to specify `--device=/dev/dri` when booting the container. Select the device you are running(device type:(Max, Flex, Arc, iGPU)). And change the `/path/to/models` to mount the models. `bench_model` is used to benchmark quickly. If want to benchmark, make sure it on the `/path/to/models`.
|
||||
|
||||
An Linux example could be:
|
||||
```bash
|
||||
#/bin/bash
|
||||
export DOCKER_IMAGE=intelanalytics/ipex-llm-inference-cpp-xpu:latest
|
||||
export CONTAINER_NAME=ipex-llm-inference-cpp-xpu-container
|
||||
sudo docker run -itd \
|
||||
--net=host \
|
||||
--device=/dev/dri \
|
||||
-v /path/to/models:/models \
|
||||
-e no_proxy=localhost,127.0.0.1 \
|
||||
--memory="32G" \
|
||||
--name=$CONTAINER_NAME \
|
||||
-e bench_model="mistral-7b-v0.1.Q4_0.gguf" \
|
||||
-e DEVICE=Arc \
|
||||
--shm-size="16g" \
|
||||
$DOCKER_IMAGE
|
||||
```
|
||||
|
||||
An Windows example could be:
|
||||
```bash
|
||||
#/bin/bash
|
||||
export DOCKER_IMAGE=intelanalytics/ipex-llm-inference-cpp-xpu:latest
|
||||
export CONTAINER_NAME=ipex-llm-inference-cpp-xpu-container
|
||||
sudo docker run -itd \
|
||||
--net=host \
|
||||
--device=/dev/dri \
|
||||
--privileged \
|
||||
-v /path/to/models:/models \
|
||||
-v /usr/lib/wsl:/usr/lib/wsl \
|
||||
-e no_proxy=localhost,127.0.0.1 \
|
||||
--memory="32G" \
|
||||
--name=$CONTAINER_NAME \
|
||||
-e bench_model="mistral-7b-v0.1.Q4_0.gguf" \
|
||||
-e DEVICE=Arc \
|
||||
--shm-size="16g" \
|
||||
$DOCKER_IMAGE
|
||||
```
|
||||
|
||||
|
||||
After the container is booted, you could get into the container through `docker exec`.
|
||||
|
||||
```bash
|
||||
docker exec -it ipex-llm-inference-cpp-xpu-container /bin/bash
|
||||
```
|
||||
|
||||
To verify the device is successfully mapped into the container, run `sycl-ls` to check the result. In a machine with Arc A770, the sampled output is:
|
||||
|
||||
```bash
|
||||
root@arda-arc12:/# sycl-ls
|
||||
[opencl:acc:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device 1.2 [2023.16.7.0.21_160000]
|
||||
[opencl:cpu:1] Intel(R) OpenCL, 13th Gen Intel(R) Core(TM) i9-13900K 3.0 [2023.16.7.0.21_160000]
|
||||
[opencl:gpu:2] Intel(R) OpenCL Graphics, Intel(R) Arc(TM) A770 Graphics 3.0 [23.17.26241.33]
|
||||
[ext_oneapi_level_zero:gpu:0] Intel(R) Level-Zero, Intel(R) Arc(TM) A770 Graphics 1.3 [1.3.26241]
|
||||
```
|
||||
|
||||
|
||||
### Quick benchmark for llama.cpp
|
||||
|
||||
Notice that the performance on windows wsl docker is a little slower than on windows host, ant it's caused by the implementation of wsl kernel.
|
||||
|
||||
```bash
|
||||
bash /llm/scripts/benchmark_llama-cpp.sh
|
||||
|
||||
# benchmark results
|
||||
llama_print_timings: load time = xxx ms
|
||||
llama_print_timings: sample time = xxx ms / xxx runs ( xxx ms per token, xxx tokens per second)
|
||||
llama_print_timings: prompt eval time = xxx ms / xxx tokens ( xxx ms per token, xxx tokens per second)
|
||||
llama_print_timings: eval time = xxx ms / 128 runs ( xxx ms per token, xxx tokens per second)
|
||||
llama_print_timings: total time = xxx ms / xxx tokens
|
||||
```
|
||||
|
||||
|
||||
### Running llama.cpp inference with IPEX-LLM on Intel GPU
|
||||
|
||||
```bash
|
||||
cd /llm/scripts/
|
||||
# set the recommended Env
|
||||
source ipex-llm-init --gpu --device $DEVICE
|
||||
# mount models and change the model_path in `start-llama-cpp.sh`
|
||||
bash start-llama-cpp.sh
|
||||
```
|
||||
|
||||
Please refer to this [documentation](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/llama_cpp_quickstart.html) for more details.
|
||||
|
||||
|
||||
### Running Ollama serving with IPEX-LLM on Intel GPU
|
||||
|
||||
Running the ollama on the background, you can see the ollama.log in `/root/ollama/ollama.log`
|
||||
```bash
|
||||
cd /llm/scripts/
|
||||
# set the recommended Env
|
||||
source ipex-llm-init --gpu --device $DEVICE
|
||||
bash start-ollama.sh # ctrl+c to exit
|
||||
```
|
||||
|
||||
#### Run Ollama models (interactive)
|
||||
|
||||
```bash
|
||||
cd /llm/ollama
|
||||
# create a file named Modelfile
|
||||
FROM /models/mistral-7b-v0.1.Q4_0.gguf
|
||||
TEMPLATE [INST] {{ .Prompt }} [/INST]
|
||||
PARAMETER num_predict 64
|
||||
|
||||
# create example and run it on console
|
||||
./ollama create example -f Modelfile
|
||||
./ollama run example
|
||||
```
|
||||
|
||||
#### Pull models from ollama to serve
|
||||
|
||||
```bash
|
||||
cd /llm/ollama
|
||||
./ollama pull llama2
|
||||
```
|
||||
|
||||
Use the Curl to Test:
|
||||
```bash
|
||||
curl http://localhost:11434/api/generate -d '
|
||||
{
|
||||
"model": "llama2",
|
||||
"prompt": "What is AI?",
|
||||
"stream": false
|
||||
}'
|
||||
```
|
||||
|
||||
Please refer to this [documentation](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/ollama_quickstart.html#pull-model) for more details.
|
||||
|
||||
|
||||
### Running Open WebUI with Intel GPU
|
||||
|
||||
1. Start the ollama and load the model first, then use the open-webui to chat. If you have difficulty accessing the huggingface repositories, you may use a mirror, e.g. add export HF_ENDPOINT=<https://hf-mirror.com> and run following script to start open-webui docker.
|
||||
|
||||
```bash
|
||||
export DOCKER_IMAGE=ghcr.io/open-webui/open-webui:main
|
||||
export CONTAINER_NAME=<YOUR-DOCKER-CONTAINER-NAME>
|
||||
|
||||
docker rm -f $CONTAINER_NAME
|
||||
|
||||
docker run -itd \
|
||||
-v open-webui:/app/backend/data \
|
||||
-e PORT=8080 \
|
||||
--network=host \
|
||||
--name $CONTAINER_NAME \
|
||||
--restart always $DOCKER_IMAGE
|
||||
```
|
||||
|
||||
2. Visit <http://localhost:8080> to use open-webui, the default ollama serve address in open-webui is `http://localhost:11434`, you can change it in connections on `http://localhost:8080/admin/settings`.
|
||||
|
||||
Sample output:
|
||||
|
||||
```bash
|
||||
INFO: Started server process [1055]
|
||||
INFO: Waiting for application startup.
|
||||
INFO: Application startup complete.
|
||||
INFO: Uvicorn running on http://0.0.0.0:8080 (Press CTRL+C to quit)
|
||||
```
|
||||
|
||||
<a href="https://llm-assets.readthedocs.io/en/latest/_images/open_webui_signup.png" target="_blank">
|
||||
<img src="https://llm-assets.readthedocs.io/en/latest/_images/open_webui_signup.png" width="100%" />
|
||||
</a>
|
||||
|
||||
For how to log-in or other guide, Please refer to this [documentation](../Quickstart/open_webui_with_ollama_quickstart.md) for more details.
|
||||
24
ipex-llm-inference-cpp-xpu/benchmark_llama-cpp.sh
Normal file
24
ipex-llm-inference-cpp-xpu/benchmark_llama-cpp.sh
Normal file
@@ -0,0 +1,24 @@
|
||||
# init llama-cpp first
|
||||
mkdir -p /llm/llama-cpp
|
||||
cd /llm/llama-cpp
|
||||
init-llama-cpp
|
||||
|
||||
# change the model_path to run
|
||||
if [[ "$DEVICE" == "Arc" || "$DEVICE" == "ARC" ]]; then
|
||||
source ipex-llm-init -g --device Arc
|
||||
elif [[ "$DEVICE" == "Flex" || "$DEVICE" == "FLEX" ]]; then
|
||||
source ipex-llm-init -g --device Flex
|
||||
elif [[ "$DEVICE" == "Max" || "$DEVICE" == "MAX" ]]; then
|
||||
source ipex-llm-init -g --device Max
|
||||
else
|
||||
echo "Invalid DEVICE specified."
|
||||
fi
|
||||
model="/models/"$bench_model
|
||||
|
||||
promt_1024_128="It is done, and submitted. You can play 'Survival of the Tastiest' on Android, and on the web. Playing on the web works, but you have to simulate multiple touch for table moving and that can be a bit confusing. There is a lot I'd like to talk about. I will go through every topic, insted of making the typical what went right/wrong list. Concept Working over the theme was probably one of the hardest tasks which I had to face. Originally, I had an idea of what kind of game I wanted to develop, gameplay wise - something with a lot of enemies/actors, simple graphics, maybe set in space, controlled from a top-down view. I was confident that I could fit any theme around it. In the end, the problem with a theme like 'Evolution' in a game is that evolution is unassisted. It happens through several seemingly random mutations over time, with the most apt permutation surviving. This genetic car simulator is, in my opinion, a great example of actual evolution of a species facing a challenge. But is it a game? In a game, you need to control something to reach an objective. That control goes against what evolution is supposed to be like. If you allow the user to pick how to evolve something, it's not evolution anymore - it's the equivalent of intelligent design, the fable invented by creationists to combat the idea of evolution. Being agnostic and a Pastafarian, that's not something that rubbed me the right way. Hence, my biggest dillema when deciding what to create was not with what I wanted to create, but with what I did not. I didn't want to create an 'intelligent design' simulator and wrongly call it evolution. This is a problem, of course, every other contestant also had to face. And judging by the entries submitted, not many managed to work around it. I'd say the only real solution was through the use of artificial selection, somehow. So far, I haven't seen any entry using this at its core gameplay. Alas, this is just a fun competition and after a while I decided not to be as strict with the game idea, and allowed myself to pick whatever I thought would work out. My initial idea was to create something where humanity tried to evolve to a next level, but had some kind of foe trying to stop them from doing so. I kind of had this image of human souls flying in space towards a monolith or a space baby (all based in 2001: A Space Odyssey of course) but I couldn't think of compelling (read: serious) mechanics for that. Borgs were my next inspiration, as their whole hypothesis fit pretty well into the evolution theme. But how to make it work? Are you the borg, or fighting the Borg? The third and final idea came to me through my girlfriend, who somehow gave me the idea of making something about the evolution of Pasta. The more I thought about it the more it sounded like it would work, so I decided to go with it. Conversations with my inspiring co-worker Roushey (who also created the 'Mechanical Underdogs' signature logo for my intros) further matured the concept, as it involved into the idea of having individual pieces of pasta flying around and trying to evolve until they became all-powerful. A secondary idea here was that the game would work to explain how the Flying Spaghetti Monster came to exist - by evolving from a normal dinner table. So the idea evolved more or less into this: you are sitting a table. You have your own plate, with is your 'base'. There are 5 other guests at the table, each with their own plate. Your plate can spawn little pieces of pasta. You do so by 'ordering' them through a menu. Some pastas are better than others; some are faster, some are stronger. They have varying 'costs', which are debited from your credits (you start with a number of credits). Once spawned, your pastas start flying around. Their instinct is to fly to other plates, in order to conquer them (the objective of the game is having your pasta conquer all the plates on the table). But they are really autonomous, so after being spawned, you have no control over your pasta (think DotA or LoL creeps). Your pasta doesn't like other people's pasta, so if they meet, they shoot sauce at each other until one dies. You get credits for other pastas your own pasta kill. Once a pasta is in vicinity of a plate"
|
||||
|
||||
# warm-up two times
|
||||
./llama-cli -m $model -n 128 --prompt "${promt_1024_128}" -t 8 -e -ngl 999 --color --ctx-size 1024 --no-mmap --temp 0
|
||||
./llama-cli -m $model -n 128 --prompt "${promt_1024_128}" -t 8 -e -ngl 999 --color --ctx-size 1024 --no-mmap --temp 0
|
||||
|
||||
./llama-cli -m $model -n 128 --prompt "${promt_1024_128}" -t 8 -e -ngl 999 --color --ctx-size 1024 --no-mmap --temp 0
|
||||
8
ipex-llm-inference-cpp-xpu/start-llama-cpp.sh
Normal file
8
ipex-llm-inference-cpp-xpu/start-llama-cpp.sh
Normal file
@@ -0,0 +1,8 @@
|
||||
# init llama-cpp first
|
||||
mkdir -p /llm/llama-cpp
|
||||
cd /llm/llama-cpp
|
||||
init-llama-cpp
|
||||
|
||||
# change the model_path to run
|
||||
model="/models/mistral-7b-v0.1.Q4_0.gguf"
|
||||
./llama-cli -m $model -n 32 --prompt "What is AI?" -t 8 -e -ngl 999 --color
|
||||
9
ipex-llm-inference-cpp-xpu/start-ollama.sh
Normal file
9
ipex-llm-inference-cpp-xpu/start-ollama.sh
Normal file
@@ -0,0 +1,9 @@
|
||||
# init ollama first
|
||||
mkdir -p /llm/ollama
|
||||
cd /llm/ollama
|
||||
init-ollama
|
||||
export OLLAMA_NUM_GPU=999
|
||||
export ZES_ENABLE_SYSMAN=1
|
||||
|
||||
# start ollama service
|
||||
(./ollama serve > ollama.log) &
|
||||
2
ipex-llm-inference-cpp-xpu/start-open-webui.sh
Normal file
2
ipex-llm-inference-cpp-xpu/start-open-webui.sh
Normal file
@@ -0,0 +1,2 @@
|
||||
cd /llm/open-webui/backend
|
||||
bash start.sh > open-webui.log
|
||||
Reference in New Issue
Block a user