Skip to content

Commit 491d526

Browse files
authored
[Fix] Add CI and Test
add CI add pytest add pre commit config
2 parents 8d2f127 + b6fe9be commit 491d526

11 files changed

Lines changed: 324 additions & 55 deletions

File tree

.github/workflows/ci.yml

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
name: PR CI
2+
3+
on:
4+
pull_request:
5+
types: [opened, synchronize, reopened, ready_for_review]
6+
branches: [ main, master, develop ]
7+
paths-ignore:
8+
- '**.md'
9+
- 'docs/**'
10+
11+
concurrency:
12+
group: pr-${{ github.event.pull_request.number }}-${{ github.workflow }}
13+
cancel-in-progress: true
14+
15+
permissions:
16+
contents: read
17+
18+
jobs:
19+
test:
20+
# 如果你只用 GitHub 托管 runner:
21+
runs-on: ubuntu-latest
22+
if: ${{ github.event.pull_request.draft == false }} # 草稿 PR 不跑
23+
24+
strategy:
25+
fail-fast: false
26+
matrix:
27+
python-version: ['3.9', '3.11']
28+
os: [ubuntu-latest]
29+
30+
steps:
31+
- name: Checkout
32+
uses: actions/checkout@v4
33+
34+
- name: Set up Python ${{ matrix.python-version }}
35+
uses: actions/setup-python@v5
36+
with:
37+
python-version: ${{ matrix.python-version }}
38+
39+
- name: Cache pip
40+
uses: actions/cache@v4
41+
with:
42+
path: ~/.cache/pip
43+
key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/requirements*.txt') }}
44+
restore-keys: |
45+
${{ runner.os }}-pip-${{ matrix.python-version }}-
46+
47+
- name: Install deps
48+
run: |
49+
python -m pip install --upgrade pip
50+
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
51+
if [ -f requirements-dev.txt ]; then pip install -r requirements-dev.txt; fi
52+
53+
- name: Lint (flake8 + isort + black --check)
54+
run: |
55+
pip install flake8 isort black
56+
flake8 .
57+
isort --check-only --diff .
58+
black --check .
59+
60+
- name: Run tests
61+
run: |
62+
pip install pytest
63+
pytest -q

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,4 +148,5 @@ logs/
148148
*.png
149149
*.ckpt
150150
/results/
151-
checkpoints
151+
checkpoints
152+
internnav/model/basemodel/LongCLIP/

.gitmodules

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,4 @@
55
[submodule "internnav/model/basemodel/LongCLIP"]
66
path = internnav/model/basemodel/LongCLIP
77
url = https://github.com/beichenzbc/Long-CLIP
8+
ignore = untracked

.pre-commit-config.yaml

Lines changed: 4 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -26,69 +26,19 @@ repos:
2626
rev: v2.2.1
2727
hooks:
2828
- id: codespell
29-
exclude: |
30-
(?x)(
31-
^toolkits/grscenes_scripts/README.md|
32-
^toolkits/indoor_scenes_generation/infinigen/infinigen_examples/constraints
33-
)
34-
# - repo: https://github.com/gitleaks/gitleaks
35-
# rev: v8.24.0
36-
# hooks:
37-
# - id: gitleaks
29+
- repo: https://github.com/gitleaks/gitleaks
30+
rev: v8.24.0
31+
hooks:
32+
- id: gitleaks
3833
- repo: https://github.com/pre-commit/pre-commit-hooks
3934
rev: v3.1.0
4035
hooks:
4136
- id: trailing-whitespace
4237
- id: check-yaml
4338
- id: end-of-file-fixer
44-
exclude: '^(.*/lcmtypes/.*)'
4539
- id: requirements-txt-fixer
46-
- id: double-quote-string-fixer
47-
exclude: '^(.*/lcmtypes/.*)'
4840
- id: check-merge-conflict
4941
- id: fix-encoding-pragma
5042
args: ["--remove"]
5143
- id: mixed-line-ending
5244
args: ["--fix=lf"]
53-
54-
# - repo: https://github.com/PyCQA/isort
55-
# rev: 5.11.5
56-
# hooks:
57-
# - id: isort
58-
# - repo: https://github.com/psf/black
59-
# rev: 22.10.0
60-
# hooks:
61-
# - id: black
62-
# args: [--line-length=79]
63-
# - repo: https://github.com/PyCQA/flake8
64-
# rev: 4.0.1
65-
# hooks:
66-
# - id: flake8
67-
# - repo: https://github.com/codespell-project/codespell
68-
# rev: v2.2.1
69-
# hooks:
70-
# - id: codespell
71-
# exclude: |
72-
# (?x)(
73-
# ^toolkits/grscenes_scripts/README.md|
74-
# ^toolkits/indoor_scenes_generation/infinigen/infinigen_examples/constraints
75-
# )
76-
# - repo: https://github.com/gitleaks/gitleaks
77-
# rev: v8.24.0
78-
# hooks:
79-
# - id: gitleaks
80-
# - repo: https://github.com/pre-commit/pre-commit-hooks
81-
# rev: v3.1.0
82-
# hooks:
83-
# - id: trailing-whitespace
84-
# - id: check-yaml
85-
# - id: end-of-file-fixer
86-
# exclude: '^(.*/lcmtypes/.*)'
87-
# - id: requirements-txt-fixer
88-
# - id: double-quote-string-fixer
89-
# exclude: '^(.*/lcmtypes/.*)'
90-
# - id: check-merge-conflict
91-
# - id: fix-encoding-pragma
92-
# args: ["--remove"]
93-
# - id: mixed-line-ending
94-
# args: ["--fix=lf"]

pyproject.toml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,3 +8,14 @@ lcmtypes
88
[tool.isort]
99
profile = "black"
1010
skip_glob = '**/lcmtypes/**'
11+
12+
[tool.pytest.ini_options]
13+
# 指定测试搜索路径(避免扫到脚本目录等)
14+
testpaths = [
15+
"tests"
16+
]
17+
addopts = "-ra --color=yes --maxfail=1"
18+
markers = [
19+
"slow: marks tests as slow",
20+
"gpu: requires GPU"
21+
]

requirements/test.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
pytest==7.3.1
2+
pytest-cov==4.0.0

tests/conftest.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
import pytest
2+
3+
4+
@pytest.fixture
5+
def tmp_cfg(tmp_path):
6+
p = tmp_path / "config.yaml"
7+
p.write_text("hello: world\n")
8+
return p
9+
10+
11+
# 按需全局 hook(例:缺 GPU 时自动跳过 gpu 标记)
12+
def pytest_runtest_setup(item):
13+
if "gpu" in item.keywords:
14+
try:
15+
import torch
16+
17+
if not torch.cuda.is_available():
18+
pytest.skip("No CUDA for gpu-marked test")
19+
except Exception:
20+
pytest.skip("Torch not available")

tests/function_test/e2e_test.py

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
import json
2+
import os
3+
import subprocess
4+
import sys
5+
6+
import pytest
7+
8+
9+
def common_body(cmd_line):
10+
with subprocess.Popen(
11+
cmd_line,
12+
stdin=subprocess.PIPE,
13+
stderr=sys.stderr,
14+
close_fds=True,
15+
stdout=sys.stdout,
16+
universal_newlines=True,
17+
shell=True,
18+
bufsize=1,
19+
) as cmd:
20+
cmd.communicate()
21+
assert cmd.returncode == 0, f'real exit code is {cmd.returncode}'
22+
23+
24+
def update_jsonl_from_json(json_file_path, jsonl_file_path, update_item):
25+
with open(json_file_path, 'r', encoding='utf-8') as json_file:
26+
data = json.load(json_file)
27+
data = {**update_item, **data}
28+
if not isinstance(data, list):
29+
data = [data]
30+
with open(jsonl_file_path, 'a', encoding='utf-8') as jsonl_file:
31+
for item in data:
32+
json_line = json.dumps(item, ensure_ascii=False)
33+
jsonl_file.write(json_line + '\n')
34+
35+
36+
def teardown_function(function):
37+
if os.path.exists('./test_result.json'):
38+
case_info = {}
39+
test_name = function.__name__
40+
case_info['case_info'] = test_name + '_' + os.environ.get('JOB_ID')
41+
update_jsonl_from_json('./test_result.json', '../total_result.jsonl', case_info)
42+
else:
43+
print('Warning! There is no test_result.json')
44+
45+
46+
"""""" """""" """""" """""" """""" """""" """""" """
47+
Test
48+
""" """""" """""" """""" """""" """""" """""" """"""
49+
50+
51+
@pytest.mark.gpu
52+
def test_evaluator():
53+
start_command = 'python ./tests/function_test/test_evaluator.py'
54+
common_body(start_command)
Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
'''
2+
Test the evaluator eval logic without model involve.
3+
The main progress:
4+
Init => warm up => fake one action
5+
'''
6+
7+
8+
def main():
9+
from enum import Enum
10+
11+
from internnav.configs.agent import AgentCfg
12+
from internnav.configs.evaluator import (
13+
EnvCfg,
14+
EvalCfg,
15+
EvalDatasetCfg,
16+
SceneCfg,
17+
TaskCfg,
18+
)
19+
20+
class runner_status_code(Enum):
21+
NORMAL = 0
22+
WARM_UP = 1
23+
NOT_RESET = 3
24+
TERMINATED = 2
25+
STOP = 4
26+
27+
eval_cfg = EvalCfg(
28+
agent=AgentCfg(
29+
server_port=8087,
30+
model_name='rdp',
31+
ckpt_path='checkpoints/r2r/fine_tuned/rdp',
32+
model_settings={},
33+
),
34+
env=EnvCfg(
35+
env_type='vln_pe',
36+
env_settings={
37+
'use_fabric': False,
38+
'headless': True, # display option: set to False will open isaac-sim interactive window
39+
},
40+
),
41+
task=TaskCfg(
42+
task_name='test_evaluator',
43+
task_settings={
44+
'env_num': 2,
45+
'use_distributed': False, # Ray distributed framework
46+
'proc_num': 8,
47+
},
48+
scene=SceneCfg(
49+
scene_type='mp3d',
50+
scene_data_dir='data/scene_data/mp3d_pe',
51+
),
52+
robot_name='h1',
53+
robot_usd_path='data/Embodiments/vln-pe/h1/h1_vln_pointcloud.usd',
54+
camera_resolution=[256, 256], # (W,H)
55+
camera_prim_path='torso_link/h1_pano_camera_0',
56+
),
57+
dataset=EvalDatasetCfg(
58+
dataset_type="mp3d",
59+
dataset_settings={
60+
'base_data_dir': 'data/vln_pe/raw_data/r2r',
61+
'split_data_types': ['val_unseen', 'val_seen'],
62+
'filter_stairs': False,
63+
},
64+
),
65+
eval_settings={'save_to_json': False, 'vis_output': False}, # save result to video under logs/
66+
)
67+
print(eval_cfg)
68+
69+
# cfg = get_config(eval_cfg)
70+
# try:
71+
# evaluator = Evaluator.init(cfg)
72+
# except Exception as e:
73+
# print(e)
74+
75+
# print('--- VlnPeEvaluator start ---')
76+
# obs, reset_info = evaluator.env.reset()
77+
# for info in reset_info:
78+
# if info is None:
79+
# continue
80+
# progress_log_multi_util.trace_start(
81+
# trajectory_id=evaluator.now_path_key(info),
82+
# )
83+
84+
# obs = evaluator.warm_up()
85+
# evaluator.fake_obs = obs[0][evaluator.robot_name]
86+
# action = [{evaluator.robot_name: {'stand_still': []}} for _ in range(evaluator.env_num * evaluator.proc_num)]
87+
# obs = evaluator._obs_remove_robot_name(obs)
88+
# evaluator.runner_status = np.full(
89+
# (evaluator.env_num * evaluator.proc_num),
90+
# runner_status_code.NORMAL,
91+
# runner_status_code,
92+
# )
93+
# evaluator.runner_status[[info is None for info in reset_info]] = runner_status_code.TERMINATED
94+
95+
# while evaluator.env.is_running():
96+
97+
# obs, terminated = evaluator.env_step(action)
98+
# break
99+
100+
# evaluator.env.close()
101+
102+
103+
if __name__ == '__main__':
104+
try:
105+
main()
106+
except Exception as e:
107+
print(f'exception is {e}')
108+
import sys
109+
import traceback
110+
111+
traceback.print_exc()
112+
sys.exit(1)

tests/unit_test/test_basic.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import math
2+
3+
import pytest
4+
5+
6+
def add(a, b):
7+
return a + b
8+
9+
10+
def test_add_works():
11+
assert add(1, 2) == 3
12+
13+
14+
@pytest.mark.parametrize("x,expected", [(0, 0.0), (math.pi, 0.0)])
15+
def test_sin(x, expected):
16+
assert math.isclose(math.sin(x), expected, abs_tol=1e-9)
17+
18+
19+
@pytest.mark.slow
20+
def test_slow_example():
21+
# 假装这里很慢
22+
assert sum(range(10000)) > 0
23+
24+
25+
@pytest.mark.gpu
26+
def test_gpu_feature():
27+
pytest.importorskip("torch")
28+
import torch
29+
30+
if not torch.cuda.is_available():
31+
pytest.skip("No CUDA available")
32+
x = torch.tensor([1.0], device="cuda")
33+
assert float(x.item()) == 1.0

0 commit comments

Comments
 (0)