diff --git a/docs/get_started/installation/kunlunxin_xpu.md b/docs/get_started/installation/kunlunxin_xpu.md
index 7c506973566..9056585b268 100644
--- a/docs/get_started/installation/kunlunxin_xpu.md
+++ b/docs/get_started/installation/kunlunxin_xpu.md
@@ -28,9 +28,9 @@ Verified platform:
```bash
mkdir Work
cd Work
-docker pull ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/fastdeploy-xpu:2.4.0
+docker pull ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/fastdeploy-xpu:2.5.0
docker run --name fastdeploy-xpu --net=host -itd --privileged -v $PWD:/Work -w /Work \
- ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/fastdeploy-xpu:2.4.0 \
+ ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/fastdeploy-xpu:2.5.0 \
/bin/bash
docker exec -it fastdeploy-xpu /bin/bash
```
@@ -40,7 +40,7 @@ docker exec -it fastdeploy-xpu /bin/bash
### Install PaddlePaddle
```bash
-python -m pip install paddlepaddle-xpu==3.3.0 -i https://www.paddlepaddle.org.cn/packages/stable/xpu-p800/
+python -m pip install paddlepaddle-xpu==3.3.1 -i https://www.paddlepaddle.org.cn/packages/stable/xpu-p800/
```
Alternatively, you can install the latest version of PaddlePaddle (Not recommended)
@@ -52,7 +52,7 @@ python -m pip install --pre paddlepaddle-xpu -i https://www.paddlepaddle.org.cn/
### Install FastDeploy (**Do NOT install via PyPI source**)
```bash
-python -m pip install fastdeploy-xpu==2.4.0 -i https://www.paddlepaddle.org.cn/packages/stable/fastdeploy-xpu-p800/ --extra-index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
+python -m pip install fastdeploy-xpu==2.5.0 -i https://www.paddlepaddle.org.cn/packages/stable/fastdeploy-xpu-p800/ --extra-index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
```
Alternatively, you can install the latest version of FastDeploy (Not recommended)
@@ -66,7 +66,7 @@ python -m pip install --pre fastdeploy-xpu -i https://www.paddlepaddle.org.cn/pa
### Install PaddlePaddle
```bash
-python -m pip install paddlepaddle-xpu==3.3.0 -i https://www.paddlepaddle.org.cn/packages/stable/xpu-p800/
+python -m pip install paddlepaddle-xpu==3.3.1 -i https://www.paddlepaddle.org.cn/packages/stable/xpu-p800/
```
Alternatively, you can install the latest version of PaddlePaddle (Not recommended)
diff --git a/docs/usage/kunlunxin_xpu_deployment.md b/docs/usage/kunlunxin_xpu_deployment.md
index 0d10e003591..1f1936838c8 100644
--- a/docs/usage/kunlunxin_xpu_deployment.md
+++ b/docs/usage/kunlunxin_xpu_deployment.md
@@ -1,27 +1,542 @@
[简体中文](../zh/usage/kunlunxin_xpu_deployment.md)
## Supported Models
-|Model Name|Context Length|Quantization|XPUs Required|Deployment Commands|Applicable Version|
-|-|-|-|-|-|-|
-|ERNIE-4.5-300B-A47B|32K|WINT8|8|export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
--port 8188 \
--tensor-parallel-size 8 \
--max-model-len 32768 \
--max-num-seqs 64 \
--quantization "wint8" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-300B-A47B|32K|WINT4|4 (Recommended)|export XPU_VISIBLE_DEVICES="0,1,2,3" or "4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
--port 8188 \
--tensor-parallel-size 4 \
--max-model-len 32768 \
--max-num-seqs 64 \
--quantization "wint4" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-300B-A47B|32K|WINT4|8|export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
--port 8188 \
--tensor-parallel-size 8 \
--max-model-len 32768 \
--max-num-seqs 64 \
--quantization "wint4" \
--gpu-memory-utilization 0.95|2.4.0|
-|ERNIE-4.5-300B-A47B|128K|WINT4|8|export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
--port 8188 \
--tensor-parallel-size 8 \
--max-model-len 131072 \
--max-num-seqs 64 \
--quantization "wint4" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-21B-A3B|32K|BF16|1|export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 32768 \
--max-num-seqs 128 \
--gpu-memory-utilization 0.9 |2.4.0|
-|ERNIE-4.5-21B-A3B|32K|WINT8|1|export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 32768 \
--max-num-seqs 128 \
--quantization "wint8" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-21B-A3B|32K|WINT4|1 (Recommended)|export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 32768 \
--max-num-seqs 128 \
--quantization "wint4" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-21B-A3B|128K|BF16|1|export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 131072 \
--max-num-seqs 128 \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-21B-A3B|128K|WINT8|1|export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 131072 \
--max-num-seqs 128 \
--quantization "wint8" \
--gpu-memory-utilization 0.9 |2.4.0|
-|ERNIE-4.5-21B-A3B|128K|WINT4|1 (Recommended)|export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 131072 \
--max-num-seqs 128 \
--quantization "wint4" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-0.3B|32K|BF16|1|export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 32768 \
--max-num-seqs 128 \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-0.3B|32K|WINT8|1 (Recommended)|export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 32768 \
--max-num-seqs 128 \
--quantization "wint8" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-0.3B|128K|BF16|1|export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 131072 \
--max-num-seqs 128 \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-0.3B|128K|WINT8|1 (Recommended)|export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 131072 \
--max-num-seqs 128 \
--quantization "wint8" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-300B-A47B-W4A8C8-TP4|32K|W4A8|4|export XPU_VISIBLE_DEVICES="0,1,2,3" or "4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-300B-A47B-W4A8C8-TP4-Paddle \
--port 8188 \
--tensor-parallel-size 4 \
--max-model-len 32768 \
--max-num-seqs 64 \
--quantization "W4A8" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-VL-28B-A3B|32K|WINT8|1|export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--quantization "wint8" \
--max-model-len 32768 \
--max-num-seqs 10 \
--enable-mm \
--mm-processor-kwargs '{"video_max_frames": 30}' \
--limit-mm-per-prompt '{"image": 10, "video": 3}' \
--reasoning-parser ernie-45-vl|2.4.0|
-|ERNIE-4.5-VL-424B-A47B|32K|WINT8|8|export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-VL-424B-A47B-Paddle \
--port 8188 \
--tensor-parallel-size 8 \
--quantization "wint8" \
--max-model-len 32768 \
--max-num-seqs 8 \
--enable-mm \
--mm-processor-kwargs '{"video_max_frames": 30}' \
--limit-mm-per-prompt '{"image": 10, "video": 3}' \
--reasoning-parser ernie-45-vl \
--gpu-memory-utilization 0.7|2.4.0|
-|PaddleOCR-VL-0.9B|32K|BF16|1|export FD_ENABLE_MAX_PREFILL=1
export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/PaddleOCR-VL \
--port 8188 \
--metrics-port 8181 \
--engine-worker-queue-port 8182 \
--max-model-len 16384 \
--max-num-batched-tokens 16384 \
--gpu-memory-utilization 0.8 \
--max-num-seqs 256|2.4.0|
-|ERNIE-4.5-VL-28B-A3B-Thinking|128K|WINT8|1|export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Thinking \
--port 8188 \
--tensor-parallel-size 1 \
--quantization "wint8" \
--max-model-len 131072 \
--max-num-seqs 32 \
--engine-worker-queue-port 8189 \
--metrics-port 8190 \
--cache-queue-port 8191 \
--reasoning-parser ernie-45-vl-thinking \
--tool-call-parser ernie-45-vl-thinking \
--mm-processor-kwargs '{"image_max_pixels": 12845056 }'|2.4.0|
+
+| Model Name | Context Length | Quantization | XPUs Required | Applicable Version |
+|------------|---------------|--------------|---------------|-------------------|
+| ERNIE-4.5-300B-A47B | 32K | WINT8 | 8 | 2.5.0 |
+| ERNIE-4.5-300B-A47B | 32K | WINT4 | 4 | 2.5.0 |
+| ERNIE-4.5-300B-A47B | 32K | WINT4 | 8 | 2.5.0 |
+| ERNIE-4.5-300B-A47B | 128K | WINT4 | 8 | 2.5.0 |
+| ERNIE-4.5-21B-A3B | 32K | BF16 | 1 | 2.5.0 |
+| ERNIE-4.5-21B-A3B | 32K | WINT8 | 1 | 2.5.0 |
+| ERNIE-4.5-21B-A3B | 32K | WINT4 | 1 | 2.5.0 |
+| ERNIE-4.5-21B-A3B | 128K | BF16 | 1 | 2.5.0 |
+| ERNIE-4.5-21B-A3B | 128K | WINT8 | 1 | 2.5.0 |
+| ERNIE-4.5-21B-A3B | 128K | WINT4 | 1 | 2.5.0 |
+| ERNIE-4.5-0.3B | 32K | BF16 | 1 | 2.5.0 |
+| ERNIE-4.5-0.3B | 32K | WINT8 | 1 | 2.5.0 |
+| ERNIE-4.5-0.3B | 128K | BF16 | 1 | 2.5.0 |
+| ERNIE-4.5-0.3B | 128K | WINT8 | 1 | 2.5.0 |
+| ERNIE-4.5-300B-A47B-W4A8C8-TP4 | 32K | W4A8 | 4 | 2.5.0 |
+| ERNIE-4.5-VL-28B-A3B | 32K | WINT8 | 1 | 2.5.0 |
+| ERNIE-4.5-VL-424B-A47B | 32K | WINT8 | 8 | 2.5.0 |
+| PaddleOCR-VL-0.9B | 32K | BF16 | 1 | 2.5.0 |
+| ERNIE-4.5-VL-28B-A3B-Thinking | 128K | WINT8 | 1 | 2.5.0 |
+
+
+ERNIE-4.5-300B-A47B (32K, WINT8, 8 XPUs) - Click to view deployment commands
+
+**Quick Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 8 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9
+```
+
+**Best Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+export BKCL_ENABLE_XDR=1
+export BKCL_RDMA_NICS=eth1,eth1,eth3,eth4 # Consistent with your network card name
+export BKCL_TRACE_TOPO=1
+export BKCL_PCIE_RING=1
+export XSHMEM_MODE=1
+export XSHMEM_QP_NUM_PER_RANK=32
+export BKCL_RDMA_VERBS=1
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --engine-worker-queue-port 8124 \
+ --metrics-port 8125 \
+ --cache-queue-port 55996 \
+ --tensor-parallel-size 8 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9 \
+ --enable-expert-parallel \
+ --enable-prefix-caching \
+ --data-parallel-size 1 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "${mtp_model_path}"}'
+```
+
+
+
+ERNIE-4.5-300B-A47B (32K, WINT4, 4 XPUs) - Click to view deployment commands
+
+**Quick Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3" # or "4,5,6,7"
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 4 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.9
+```
+
+**Best Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3" # or "4,5,6,7"
+export BKCL_ENABLE_XDR=1
+export BKCL_RDMA_NICS=eth1,eth1,eth3,eth4 # Consistent with your network card name
+export BKCL_TRACE_TOPO=1
+export BKCL_PCIE_RING=1
+export XSHMEM_MODE=1
+export XSHMEM_QP_NUM_PER_RANK=32
+export BKCL_RDMA_VERBS=1
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --engine-worker-queue-port 8124 \
+ --metrics-port 8125 \
+ --cache-queue-port 55996 \
+ --tensor-parallel-size 4 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization wint4 \
+ --gpu-memory-utilization 0.9 \
+ --enable-expert-parallel \
+ --enable-prefix-caching \
+ --data-parallel-size 1 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "${mtp_model_path}"}'
+```
+
+
+
+ERNIE-4.5-300B-A47B (32K, WINT4, 8 XPUs) - Click to view deployment commands
+
+**Quick Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 8 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.95
+```
+
+**Best Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+export BKCL_ENABLE_XDR=1
+export BKCL_RDMA_NICS=eth1,eth1,eth3,eth4 # Consistent with your network card name
+export BKCL_TRACE_TOPO=1
+export BKCL_PCIE_RING=1
+export XSHMEM_MODE=1
+export XSHMEM_QP_NUM_PER_RANK=32
+export BKCL_RDMA_VERBS=1
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model /home/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --engine-worker-queue-port 8124 \
+ --metrics-port 8125 \
+ --cache-queue-port 55996 \
+ --tensor-parallel-size 8 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization wint4 \
+ --gpu-memory-utilization 0.95 \
+ --enable-expert-parallel \
+ --enable-prefix-caching \
+ --data-parallel-size 1 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "${mtp_model_path}"}'
+```
+
+
+
+ERNIE-4.5-300B-A47B (128K, WINT4, 8 XPUs) - Click to view deployment commands
+
+**Quick Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 8 \
+ --max-model-len 131072 \
+ --max-num-seqs 64 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.9
+```
+
+**Best Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+export BKCL_ENABLE_XDR=1
+export BKCL_RDMA_NICS=eth1,eth1,eth3,eth4 # Consistent with your network card name
+export BKCL_TRACE_TOPO=1
+export BKCL_PCIE_RING=1
+export XSHMEM_MODE=1
+export XSHMEM_QP_NUM_PER_RANK=32
+export BKCL_RDMA_VERBS=1
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model /home/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8123 \
+ --engine-worker-queue-port 8124 \
+ --metrics-port 8125 \
+ --cache-queue-port 55996 \
+ --tensor-parallel-size 8 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization wint4 \
+ --gpu-memory-utilization 0.9 \
+ --enable-expert-parallel \
+ --enable-prefix-caching \
+ --data-parallel-size 1 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "${mtp_model_path}"}'
+```
+
+
+
+ERNIE-4.5-21B-A3B (32K, BF16, 1 XPU) - Click to view deployment commands
+
+**Quick Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --gpu-memory-utilization 0.9
+```
+
+**Best Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --gpu-memory-utilization 0.9 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "'${mtp_model_path}'"}'
+```
+
+
+
+ERNIE-4.5-21B-A3B (32K, WINT8, 1 XPU) - Click to view deployment commands
+
+**Quick Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9
+```
+
+**Best Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "'${mtp_model_path}'"}'
+```
+
+
+
+ERNIE-4.5-21B-A3B (32K, WINT4, 1 XPU) - Click to view deployment commands
+
+**Quick Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.9
+```
+
+**Best Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.9 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "'${mtp_model_path}'"}'
+```
+
+
+
+ERNIE-4.5-21B-A3B (128K, BF16, 1 XPU) - Click to view deployment commands
+
+**Quick Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --gpu-memory-utilization 0.9
+```
+
+**Best Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --gpu-memory-utilization 0.9 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "'${mtp_model_path}'"}'
+```
+
+
+
+ERNIE-4.5-21B-A3B (128K, WINT8, 1 XPU) - Click to view deployment commands
+
+**Quick Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9
+```
+
+**Best Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "'${mtp_model_path}'"}'
+```
+
+
+
+ERNIE-4.5-21B-A3B (128K, WINT4, 1 XPU) - Click to view deployment commands
+
+**Quick Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.9
+```
+
+**Best Deployment:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.9 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "'${mtp_model_path}'"}'
+```
+
+
+
+ERNIE-4.5-0.3B (32K, BF16, 1 XPU) - Click to view deployment commands
+
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --gpu-memory-utilization 0.9
+```
+
+
+
+ERNIE-4.5-0.3B (32K, WINT8, 1 XPU) - Click to view deployment commands
+
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9
+```
+
+
+
+ERNIE-4.5-0.3B (128K, BF16, 1 XPU) - Click to view deployment commands
+
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --gpu-memory-utilization 0.9
+```
+
+
+
+ERNIE-4.5-0.3B (128K, WINT8, 1 XPU) - Click to view deployment commands
+
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9
+```
+
+
+
+ERNIE-4.5-300B-A47B-W4A8C8-TP4 (32K, W4A8, 4 XPUs) - Click to view deployment commands
+
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3" # or "4,5,6,7"
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-W4A8C8-TP4-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 4 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization "W4A8" \
+ --gpu-memory-utilization 0.9
+```
+
+
+
+ERNIE-4.5-VL-28B-A3B (32K, WINT8, 1 XPU) - Click to view deployment commands
+
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --quantization "wint8" \
+ --max-model-len 32768 \
+ --max-num-seqs 10 \
+ --enable-mm \
+ --mm-processor-kwargs '{"video_max_frames": 30}' \
+ --limit-mm-per-prompt '{"image": 10, "video": 3}' \
+ --reasoning-parser ernie-45-vl
+```
+
+
+
+ERNIE-4.5-VL-424B-A47B (32K, WINT8, 8 XPUs) - Click to view deployment commands
+
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-VL-424B-A47B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 8 \
+ --quantization "wint8" \
+ --max-model-len 32768 \
+ --max-num-seqs 8 \
+ --enable-mm \
+ --mm-processor-kwargs '{"video_max_frames": 30}' \
+ --limit-mm-per-prompt '{"image": 10, "video": 3}' \
+ --reasoning-parser ernie-45-vl \
+ --gpu-memory-utilization 0.7
+```
+
+
+
+PaddleOCR-VL-0.9B (32K, BF16, 1 XPU) - Click to view deployment commands
+
+```bash
+export FD_ENABLE_MAX_PREFILL=1
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/PaddleOCR-VL \
+ --port 8188 \
+ --metrics-port 8181 \
+ --engine-worker-queue-port 8182 \
+ --max-model-len 16384 \
+ --max-num-batched-tokens 16384 \
+ --gpu-memory-utilization 0.8 \
+ --max-num-seqs 256
+```
+
+
+
+ERNIE-4.5-VL-28B-A3B-Thinking (128K, WINT8, 1 XPU) - Click to view deployment commands
+
+```bash
+export XPU_VISIBLE_DEVICES="0" # Specify any card
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Thinking \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --quantization "wint8" \
+ --max-model-len 131072 \
+ --max-num-seqs 32 \
+ --engine-worker-queue-port 8189 \
+ --metrics-port 8190 \
+ --cache-queue-port 8191 \
+ --reasoning-parser ernie-45-vl-thinking \
+ --tool-call-parser ernie-45-vl-thinking \
+ --mm-processor-kwargs '{"image_max_pixels": 12845056}'
+```
+
## Quick start
@@ -40,8 +555,7 @@ python -m fastdeploy.entrypoints.openai.api_server \
--max-model-len 32768 \
--max-num-seqs 64 \
--quantization "wint4" \
- --gpu-memory-utilization 0.9 \
- --load-choices "default"
+ --gpu-memory-utilization 0.9
```
**Note:** When deploying on 4 XPUs, only two configurations are supported which constrained by hardware limitations such as interconnect capabilities.
@@ -113,8 +627,7 @@ python -m fastdeploy.entrypoints.openai.api_server \
--enable-mm \
--mm-processor-kwargs '{"video_max_frames": 30}' \
--limit-mm-per-prompt '{"image": 10, "video": 3}' \
- --reasoning-parser ernie-45-vl \
- --load-choices "default"
+ --reasoning-parser ernie-45-vl
```
#### Send requests
@@ -258,8 +771,7 @@ python -m fastdeploy.entrypoints.openai.api_server \
--cache-queue-port 8191 \
--reasoning-parser ernie-45-vl-thinking \
--tool-call-parser ernie-45-vl-thinking \
- --mm-processor-kwargs '{"image_max_pixels": 12845056 }' \
- --load-choices "default_v1"
+ --mm-processor-kwargs '{"image_max_pixels": 12845056 }'
```
#### Send requests
diff --git a/docs/zh/get_started/installation/kunlunxin_xpu.md b/docs/zh/get_started/installation/kunlunxin_xpu.md
index b0e7f2a64f3..d3ec92a9d56 100644
--- a/docs/zh/get_started/installation/kunlunxin_xpu.md
+++ b/docs/zh/get_started/installation/kunlunxin_xpu.md
@@ -28,9 +28,9 @@
```bash
mkdir Work
cd Work
-docker pull ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/fastdeploy-xpu:2.4.0
+docker pull ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/fastdeploy-xpu:2.5.0
docker run --name fastdeploy-xpu --net=host -itd --privileged -v $PWD:/Work -w /Work \
- ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/fastdeploy-xpu:2.4.0 \
+ ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/fastdeploy-xpu:2.5.0 \
/bin/bash
docker exec -it fastdeploy-xpu /bin/bash
```
@@ -40,7 +40,7 @@ docker exec -it fastdeploy-xpu /bin/bash
### 安装 PaddlePaddle
```bash
-python -m pip install paddlepaddle-xpu==3.3.0 -i https://www.paddlepaddle.org.cn/packages/stable/xpu-p800/
+python -m pip install paddlepaddle-xpu==3.3.1 -i https://www.paddlepaddle.org.cn/packages/stable/xpu-p800/
```
或者您也可以安装最新版 PaddlePaddle(不推荐)
@@ -52,7 +52,7 @@ python -m pip install --pre paddlepaddle-xpu -i https://www.paddlepaddle.org.cn/
### 安装 FastDeploy(**注意不要通过 pypi 源安装**)
```bash
-python -m pip install fastdeploy-xpu==2.4.0 -i https://www.paddlepaddle.org.cn/packages/stable/fastdeploy-xpu-p800/ --extra-index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
+python -m pip install fastdeploy-xpu==2.5.0 -i https://www.paddlepaddle.org.cn/packages/stable/fastdeploy-xpu-p800/ --extra-index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
```
或者你也可以安装最新版 FastDeploy(不推荐)
@@ -66,7 +66,7 @@ python -m pip install --pre fastdeploy-xpu -i https://www.paddlepaddle.org.cn/pa
### 安装 PaddlePaddle
```bash
-python -m pip install paddlepaddle-xpu==3.3.0 -i https://www.paddlepaddle.org.cn/packages/stable/xpu-p800/
+python -m pip install paddlepaddle-xpu==3.3.1 -i https://www.paddlepaddle.org.cn/packages/stable/xpu-p800/
```
或者您也可以安装最新版 PaddlePaddle(不推荐)
diff --git a/docs/zh/usage/kunlunxin_xpu_deployment.md b/docs/zh/usage/kunlunxin_xpu_deployment.md
index ec7d12135db..17df4ef7919 100644
--- a/docs/zh/usage/kunlunxin_xpu_deployment.md
+++ b/docs/zh/usage/kunlunxin_xpu_deployment.md
@@ -1,27 +1,542 @@
[English](../../usage/kunlunxin_xpu_deployment.md)
## 支持的模型
-|模型名|上下文长度|量化|所需卡数|部署命令|适用版本|
-|-|-|-|-|-|-|
-|ERNIE-4.5-300B-A47B|32K|WINT8|8|export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
--port 8188 \
--tensor-parallel-size 8 \
--max-model-len 32768 \
--max-num-seqs 64 \
--quantization "wint8" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-300B-A47B|32K|WINT4|4 (推荐)|export XPU_VISIBLE_DEVICES="0,1,2,3" or "4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
--port 8188 \
--tensor-parallel-size 4 \
--max-model-len 32768 \
--max-num-seqs 64 \
--quantization "wint4" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-300B-A47B|32K|WINT4|8|export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
--port 8188 \
--tensor-parallel-size 8 \
--max-model-len 32768 \
--max-num-seqs 64 \
--quantization "wint4" \
--gpu-memory-utilization 0.95|2.4.0|
-|ERNIE-4.5-300B-A47B|128K|WINT4|8|export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
--port 8188 \
--tensor-parallel-size 8 \
--max-model-len 131072 \
--max-num-seqs 64 \
--quantization "wint4" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-21B-A3B|32K|BF16|1|export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 32768 \
--max-num-seqs 128 \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-21B-A3B|32K|WINT8|1|export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 32768 \
--max-num-seqs 128 \
--quantization "wint8" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-21B-A3B|32K|WINT4|1 (推荐)|export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 32768 \
--max-num-seqs 128 \
--quantization "wint4" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-21B-A3B|128K|BF16|1|export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 131072 \
--max-num-seqs 128 \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-21B-A3B|128K|WINT8|1|export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 131072 \
--max-num-seqs 128 \
--quantization "wint8" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-21B-A3B|128K|WINT4|1 (推荐)|export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 131072 \
--max-num-seqs 128 \
--quantization "wint4" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-0.3B|32K|BF16|1|export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 32768 \
--max-num-seqs 128 \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-0.3B|32K|WINT8|1 (推荐)|export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 32768 \
--max-num-seqs 128 \
--quantization "wint8" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-0.3B|128K|BF16|1|export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 131072 \
--max-num-seqs 128 \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-0.3B|128K|WINT8|1 (推荐)|export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--max-model-len 131072 \
--max-num-seqs 128 \
--quantization "wint8" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-300B-A47B-W4A8C8-TP4|32K|W4A8|4|export XPU_VISIBLE_DEVICES="0,1,2,3" or "4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-300B-A47B-W4A8C8-TP4-Paddle \
--port 8188 \
--tensor-parallel-size 4 \
--max-model-len 32768 \
--max-num-seqs 64 \
--quantization "W4A8" \
--gpu-memory-utilization 0.9|2.4.0|
-|ERNIE-4.5-VL-28B-A3B|32K|WINT8|1|export XPU_VISIBLE_DEVICES="0"# 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--quantization "wint8" \
--max-model-len 32768 \
--max-num-seqs 10 \
--enable-mm \
--mm-processor-kwargs '{"video_max_frames": 30}' \
--limit-mm-per-prompt '{"image": 10, "video": 3}' \
--reasoning-parser ernie-45-vl|2.4.0|
-|ERNIE-4.5-VL-424B-A47B|32K|WINT8|8|export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-VL-424B-A47B-Paddle \
--port 8188 \
--tensor-parallel-size 8 \
--quantization "wint8" \
--max-model-len 32768 \
--max-num-seqs 8 \
--enable-mm \
--mm-processor-kwargs '{"video_max_frames": 30}' \
--limit-mm-per-prompt '{"image": 10, "video": 3}' \
--reasoning-parser ernie-45-vl \
--gpu-memory-utilization 0.7|2.4.0|
-|PaddleOCR-VL-0.9B|32K|BF16|1|export FD_ENABLE_MAX_PREFILL=1
export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/PaddleOCR-VL \
--port 8188 \
--metrics-port 8181 \
--engine-worker-queue-port 8182 \
--max-model-len 16384 \
--max-num-batched-tokens 16384 \
--gpu-memory-utilization 0.8 \
--max-num-seqs 256|2.4.0|
-|ERNIE-4.5-VL-28B-A3B-Thinking|128K|WINT8|1|export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Thinking \
--port 8188 \
--tensor-parallel-size 1 \
--quantization "wint8" \
--max-model-len 131072 \
--max-num-seqs 32 \
--engine-worker-queue-port 8189 \
--metrics-port 8190 \
--cache-queue-port 8191 \
--reasoning-parser ernie-45-vl-thinking \
--tool-call-parser ernie-45-vl-thinking \
--mm-processor-kwargs '{"image_max_pixels": 12845056 }'|2.4.0|
+
+| 模型名称 | 上下文长度 | 量化 | 所需卡数 | 适用版本 |
+|----------|-----------|------|---------|---------|
+| ERNIE-4.5-300B-A47B | 32K | WINT8 | 8 | 2.5.0 |
+| ERNIE-4.5-300B-A47B | 32K | WINT4 | 4 | 2.5.0 |
+| ERNIE-4.5-300B-A47B | 32K | WINT4 | 8 | 2.5.0 |
+| ERNIE-4.5-300B-A47B | 128K | WINT4 | 8 | 2.5.0 |
+| ERNIE-4.5-21B-A3B | 32K | BF16 | 1 | 2.5.0 |
+| ERNIE-4.5-21B-A3B | 32K | WINT8 | 1 | 2.5.0 |
+| ERNIE-4.5-21B-A3B | 32K | WINT4 | 1 | 2.5.0 |
+| ERNIE-4.5-21B-A3B | 128K | BF16 | 1 | 2.5.0 |
+| ERNIE-4.5-21B-A3B | 128K | WINT8 | 1 | 2.5.0 |
+| ERNIE-4.5-21B-A3B | 128K | WINT4 | 1 | 2.5.0 |
+| ERNIE-4.5-0.3B | 32K | BF16 | 1 | 2.5.0 |
+| ERNIE-4.5-0.3B | 32K | WINT8 | 1 | 2.5.0 |
+| ERNIE-4.5-0.3B | 128K | BF16 | 1 | 2.5.0 |
+| ERNIE-4.5-0.3B | 128K | WINT8 | 1 | 2.5.0 |
+| ERNIE-4.5-300B-A47B-W4A8C8-TP4 | 32K | W4A8 | 4 | 2.5.0 |
+| ERNIE-4.5-VL-28B-A3B | 32K | WINT8 | 1 | 2.5.0 |
+| ERNIE-4.5-VL-424B-A47B | 32K | WINT8 | 8 | 2.5.0 |
+| PaddleOCR-VL-0.9B | 32K | BF16 | 1 | 2.5.0 |
+| ERNIE-4.5-VL-28B-A3B-Thinking | 128K | WINT8 | 1 | 2.5.0 |
+
+
+ERNIE-4.5-300B-A47B (32K, WINT8, 8 卡) - 点击查看部署命令
+
+**快速部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 8 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9
+```
+
+**最优部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+export BKCL_ENABLE_XDR=1
+export BKCL_RDMA_NICS=eth1,eth1,eth3,eth4 # 与线上网卡名一致
+export BKCL_TRACE_TOPO=1
+export BKCL_PCIE_RING=1
+export XSHMEM_MODE=1
+export XSHMEM_QP_NUM_PER_RANK=32
+export BKCL_RDMA_VERBS=1
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --engine-worker-queue-port 8124 \
+ --metrics-port 8125 \
+ --cache-queue-port 55996 \
+ --tensor-parallel-size 8 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9 \
+ --enable-expert-parallel \
+ --enable-prefix-caching \
+ --data-parallel-size 1 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "${mtp_model_path}"}'
+```
+
+
+
+ERNIE-4.5-300B-A47B (32K, WINT4, 4 卡) - 点击查看部署命令
+
+**快速部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3" # 或 "4,5,6,7"
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 4 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.9
+```
+
+**最优部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3" # 或 "4,5,6,7"
+export BKCL_ENABLE_XDR=1
+export BKCL_RDMA_NICS=eth1,eth1,eth3,eth4 # 与线上网卡名一致
+export BKCL_TRACE_TOPO=1
+export BKCL_PCIE_RING=1
+export XSHMEM_MODE=1
+export XSHMEM_QP_NUM_PER_RANK=32
+export BKCL_RDMA_VERBS=1
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --engine-worker-queue-port 8124 \
+ --metrics-port 8125 \
+ --cache-queue-port 55996 \
+ --tensor-parallel-size 4 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization wint4 \
+ --gpu-memory-utilization 0.9 \
+ --enable-expert-parallel \
+ --enable-prefix-caching \
+ --data-parallel-size 1 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "${mtp_model_path}"}'
+```
+
+
+
+ERNIE-4.5-300B-A47B (32K, WINT4, 8 卡) - 点击查看部署命令
+
+**快速部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 8 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.95
+```
+
+**最优部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+export BKCL_ENABLE_XDR=1
+export BKCL_RDMA_NICS=eth1,eth1,eth3,eth4 # 与线上网卡名一致
+export BKCL_TRACE_TOPO=1
+export BKCL_PCIE_RING=1
+export XSHMEM_MODE=1
+export XSHMEM_QP_NUM_PER_RANK=32
+export BKCL_RDMA_VERBS=1
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model /home/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --engine-worker-queue-port 8124 \
+ --metrics-port 8125 \
+ --cache-queue-port 55996 \
+ --tensor-parallel-size 8 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization wint4 \
+ --gpu-memory-utilization 0.95 \
+ --enable-expert-parallel \
+ --enable-prefix-caching \
+ --data-parallel-size 1 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "${mtp_model_path}"}'
+```
+
+
+
+ERNIE-4.5-300B-A47B (128K, WINT4, 8 卡) - 点击查看部署命令
+
+**快速部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 8 \
+ --max-model-len 131072 \
+ --max-num-seqs 64 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.9
+```
+
+**最优部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+export BKCL_ENABLE_XDR=1
+export BKCL_RDMA_NICS=eth1,eth1,eth3,eth4 # 与线上网卡名一致
+export BKCL_TRACE_TOPO=1
+export BKCL_PCIE_RING=1
+export XSHMEM_MODE=1
+export XSHMEM_QP_NUM_PER_RANK=32
+export BKCL_RDMA_VERBS=1
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model /home/ERNIE-4.5-300B-A47B-Paddle \
+ --port 8123 \
+ --engine-worker-queue-port 8124 \
+ --metrics-port 8125 \
+ --cache-queue-port 55996 \
+ --tensor-parallel-size 8 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization wint4 \
+ --gpu-memory-utilization 0.9 \
+ --enable-expert-parallel \
+ --enable-prefix-caching \
+ --data-parallel-size 1 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "${mtp_model_path}"}'
+```
+
+
+
+ERNIE-4.5-21B-A3B (32K, BF16, 1 卡) - 点击查看部署命令
+
+**快速部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --gpu-memory-utilization 0.9
+```
+
+**最优部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --gpu-memory-utilization 0.9 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "'${mtp_model_path}'"}'
+```
+
+
+
+ERNIE-4.5-21B-A3B (32K, WINT8, 1 卡) - 点击查看部署命令
+
+**快速部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9
+```
+
+**最优部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "'${mtp_model_path}'"}'
+```
+
+
+
+ERNIE-4.5-21B-A3B (32K, WINT4, 1 卡) - 点击查看部署命令
+
+**快速部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.9
+```
+
+**最优部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.9 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "'${mtp_model_path}'"}'
+```
+
+
+
+ERNIE-4.5-21B-A3B (128K, BF16, 1 卡) - 点击查看部署命令
+
+**快速部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --gpu-memory-utilization 0.9
+```
+
+**最优部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --gpu-memory-utilization 0.9 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "'${mtp_model_path}'"}'
+```
+
+
+
+ERNIE-4.5-21B-A3B (128K, WINT8, 1 卡) - 点击查看部署命令
+
+**快速部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9
+```
+
+**最优部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "'${mtp_model_path}'"}'
+```
+
+
+
+ERNIE-4.5-21B-A3B (128K, WINT4, 1 卡) - 点击查看部署命令
+
+**快速部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.9
+```
+
+**最优部署:**
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-21B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --quantization "wint4" \
+ --gpu-memory-utilization 0.9 \
+ --speculative-config '{"method": "mtp", "num_speculative_tokens": 1, "model": "'${mtp_model_path}'"}'
+```
+
+
+
+ERNIE-4.5-0.3B (32K, BF16, 1 卡) - 点击查看部署命令
+
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --gpu-memory-utilization 0.9
+```
+
+
+
+ERNIE-4.5-0.3B (32K, WINT8, 1 卡) - 点击查看部署命令
+
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 32768 \
+ --max-num-seqs 128 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9
+```
+
+
+
+ERNIE-4.5-0.3B (128K, BF16, 1 卡) - 点击查看部署命令
+
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --gpu-memory-utilization 0.9
+```
+
+
+
+ERNIE-4.5-0.3B (128K, WINT8, 1 卡) - 点击查看部署命令
+
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-0.3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --max-model-len 131072 \
+ --max-num-seqs 128 \
+ --quantization "wint8" \
+ --gpu-memory-utilization 0.9
+```
+
+
+
+ERNIE-4.5-300B-A47B-W4A8C8-TP4 (32K, W4A8, 4 卡) - 点击查看部署命令
+
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3" # 或 "4,5,6,7"
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-300B-A47B-W4A8C8-TP4-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 4 \
+ --max-model-len 32768 \
+ --max-num-seqs 64 \
+ --quantization "W4A8" \
+ --gpu-memory-utilization 0.9
+```
+
+
+
+ERNIE-4.5-VL-28B-A3B (32K, WINT8, 1 卡) - 点击查看部署命令
+
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --quantization "wint8" \
+ --max-model-len 32768 \
+ --max-num-seqs 10 \
+ --enable-mm \
+ --mm-processor-kwargs '{"video_max_frames": 30}' \
+ --limit-mm-per-prompt '{"image": 10, "video": 3}' \
+ --reasoning-parser ernie-45-vl
+```
+
+
+
+ERNIE-4.5-VL-424B-A47B (32K, WINT8, 8 卡) - 点击查看部署命令
+
+```bash
+export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-VL-424B-A47B-Paddle \
+ --port 8188 \
+ --tensor-parallel-size 8 \
+ --quantization "wint8" \
+ --max-model-len 32768 \
+ --max-num-seqs 8 \
+ --enable-mm \
+ --mm-processor-kwargs '{"video_max_frames": 30}' \
+ --limit-mm-per-prompt '{"image": 10, "video": 3}' \
+ --reasoning-parser ernie-45-vl \
+ --gpu-memory-utilization 0.7
+```
+
+
+
+PaddleOCR-VL-0.9B (32K, BF16, 1 卡) - 点击查看部署命令
+
+```bash
+export FD_ENABLE_MAX_PREFILL=1
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/PaddleOCR-VL \
+ --port 8188 \
+ --metrics-port 8181 \
+ --engine-worker-queue-port 8182 \
+ --max-model-len 16384 \
+ --max-num-batched-tokens 16384 \
+ --gpu-memory-utilization 0.8 \
+ --max-num-seqs 256
+```
+
+
+
+ERNIE-4.5-VL-28B-A3B-Thinking (128K, WINT8, 1 卡) - 点击查看部署命令
+
+```bash
+export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
+python -m fastdeploy.entrypoints.openai.api_server \
+ --model PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Thinking \
+ --port 8188 \
+ --tensor-parallel-size 1 \
+ --quantization "wint8" \
+ --max-model-len 131072 \
+ --max-num-seqs 32 \
+ --engine-worker-queue-port 8189 \
+ --metrics-port 8190 \
+ --cache-queue-port 8191 \
+ --reasoning-parser ernie-45-vl-thinking \
+ --tool-call-parser ernie-45-vl-thinking \
+ --mm-processor-kwargs '{"image_max_pixels": 12845056}'
+```
+
## 快速开始