Skip to content

Commit 8d64a7a

Browse files
committed
Merge branch 'main' into support-qwen3-omni
2 parents a62dac2 + b20464d commit 8d64a7a

File tree

62 files changed

+1050
-1500
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

62 files changed

+1050
-1500
lines changed

.github/workflows/api_eval.yml

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -32,19 +32,20 @@ on:
3232
description: 'Set custom run ID. If not provided, github.run_id will be used'
3333
type: string
3434
default: ''
35-
35+
offline_mode:
36+
required: true
37+
description: 'Whether start a offline mode, if true, you should prepare code and whl package by yourself'
38+
type: boolean
39+
default: false
3640

3741
env:
3842
HOST_PIP_CACHE_DIR: /nvme/github-actions/pip-cache
3943
HOST_LOCALTIME: /usr/share/zoneinfo/Asia/Shanghai
4044
ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true
4145
REPORT_DIR: /nvme/qa_test_models/evaluation_report/allure_report/${{ inputs.repo_ref }}_${{ github.run_id }}
4246
COV_PARAM: --cov /opt/py3/lib/python3.10/site-packages/lmdeploy
43-
FAIL_CONFIG: '--lf'
4447
TEST_CODE_PATH: /nvme/qa_test_models/test_pkg/lmdeploy/${{ inputs.repo_ref }}_${{ github.run_id }}
4548
OFFLINE_CODE_PATH: /nvme/qa_test_models/offline_pkg/lmdeploy
46-
OFFLINE_REQUIREMENTS: /nvme/qa_test_models/offline_pkg/requirements.txt
47-
DEEPSEEK_VL: /nvme/qa_test_models/offline_pkg/DeepSeek-VL
4849
COMPASS_DATA_CACHE: /nvme/qa_test_models/compass_data_cache
4950
HF_DATASETS_OFFLINE: 1
5051
HF_DATASETS_CACHE: /nvme/qa_test_models/hf_datasets
@@ -54,7 +55,7 @@ env:
5455

5556
jobs:
5657
linux-build:
57-
if: ${{ !cancelled() }}
58+
if: ${{github.event_name == 'schedule' || (!cancelled() && !inputs.offline_mode)}}
5859
strategy:
5960
matrix:
6061
pyver: [py310]
@@ -154,6 +155,9 @@ jobs:
154155
matrix:
155156
backend: ${{ fromJSON(inputs.backend || '["turbomind", "pytorch"]')}}
156157
gpu_num: ['gpu_num_1', 'gpu_num_2', 'gpu_num_4', 'gpu_num_8']
158+
transformers: ["", "legacy"]
159+
env:
160+
TEST_ENV: ${{ matrix.transformers }}
157161
container:
158162
image: openmmlab/lmdeploy:latest-cu12.8
159163
options: "--gpus=all --ipc=host --user root -e PIP_CACHE_DIR=/root/.cache/pip -e NVIDIA_DISABLE_REQUIRE=1 --pull never"
@@ -186,6 +190,10 @@ jobs:
186190
cd opencompass
187191
python3 -m pip install .
188192
python3 -m pip install langdetect
193+
- name: Downgrade transformers
194+
if: ${{matrix.transformers == 'legacy'}}
195+
run: |
196+
pip install transformers==4.57.6
189197
- name: Check env
190198
run: |
191199
python3 -m pip list
@@ -211,5 +219,6 @@ jobs:
211219
if: always()
212220
run: |
213221
echo "status=done" >> ${{env.REPORT_DIR}}/status.txt
222+
chmod -R 777 ${{env.REPORT_DIR}}
214223
export workdir=$(pwd)
215224
rm -rf $workdir/*

.github/workflows/api_eval_h800.yml

Lines changed: 0 additions & 169 deletions
This file was deleted.

.github/workflows/benchmark.yml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,7 @@ jobs:
139139
matrix:
140140
benchmark_type: ${{fromJSON(github.event.inputs.benchmark_type)}}
141141
gpu_num: ['gpu_num_1', 'gpu_num_2', 'gpu_num_4', 'gpu_num_8']
142+
transformers: ["", "legacy"]
142143
include:
143144
- n: 8
144145
gpu_num: gpu_num_1
@@ -148,6 +149,8 @@ jobs:
148149
gpu_num: gpu_num_4
149150
- n: 1
150151
gpu_num: gpu_num_8
152+
env:
153+
TEST_ENV: ${{ matrix.transformers }}
151154
timeout-minutes: 480
152155
container:
153156
image: openmmlab/lmdeploy:latest-cu12.8
@@ -174,6 +177,10 @@ jobs:
174177
run: |
175178
python3 -m pip uninstall lmdeploy -y && python3 -m pip install lmdeploy-*.whl --no-deps
176179
python3 -m pip install -r requirements/test.txt
180+
- name: Downgrade transformers
181+
if: ${{matrix.transformers == 'legacy'}}
182+
run: |
183+
pip install transformers==4.57.6
177184
- name: Check env
178185
run: |
179186
python3 -m pip list
File renamed without changes.

0 commit comments

Comments
 (0)