Skip to content

Commit ebfd643

Browse files
committed
ci+docs: prep v1.4.1 and fix GitHub Actions
1 parent ff4de20 commit ebfd643

File tree

5 files changed

+81
-124
lines changed

5 files changed

+81
-124
lines changed

.github/workflows/ci-cd.yml

Lines changed: 40 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -2,25 +2,20 @@ name: CI/CD Pipeline
22

33
on:
44
push:
5-
branches: [main, develop]
5+
branches: [master, main, develop]
66
pull_request:
7-
branches: [main, develop]
7+
branches: [master, main, develop]
88
release:
99
types: [published]
10+
workflow_dispatch:
1011

1112
jobs:
1213
test:
1314
runs-on: ${{ matrix.os }}
1415
strategy:
1516
matrix:
1617
os: [ubuntu-latest, windows-latest, macos-latest]
17-
python-version: [3.8, 3.9, "3.10", "3.11"]
18-
exclude:
19-
# Exclude some combinations to reduce job count
20-
- os: macos-latest
21-
python-version: 3.8
22-
- os: windows-latest
23-
python-version: 3.8
18+
python-version: ["3.12"]
2419

2520
steps:
2621
- uses: actions/checkout@v4
@@ -30,6 +25,11 @@ jobs:
3025
with:
3126
python-version: ${{ matrix.python-version }}
3227

28+
- name: Set up uv
29+
uses: astral-sh/setup-uv@v5
30+
with:
31+
enable-cache: true
32+
3333
- name: Install system dependencies (Ubuntu)
3434
if: matrix.os == 'ubuntu-latest'
3535
run: |
@@ -47,35 +47,22 @@ jobs:
4747
run: |
4848
choco install ffmpeg cmake
4949
50-
- name: Cache pip dependencies
51-
uses: actions/cache@v3
52-
with:
53-
path: ~/.cache/pip
54-
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
55-
restore-keys: |
56-
${{ runner.os }}-pip-
57-
5850
- name: Install dependencies
5951
run: |
60-
python -m pip install --upgrade pip
61-
pip install -e .[dev]
52+
uv sync --dev
6253
63-
- name: Lint with flake8
54+
- name: Lint (ruff - minimal)
6455
run: |
65-
flake8 src tests --count --select=E9,F63,F7,F82 --show-source --statistics
66-
flake8 src tests --count --exit-zero --max-complexity=10 --max-line-length=88 --statistics
67-
68-
- name: Format check with black
69-
run: |
70-
black --check src tests
56+
uv run ruff check src/videoannotator tests --select=E9,F63,F7,F82
7157
7258
- name: Type check with mypy
59+
if: github.event_name == 'workflow_dispatch'
7360
run: |
74-
mypy src
61+
uv run mypy src/videoannotator
7562
7663
- name: Test with pytest
7764
run: |
78-
pytest tests/ -v --cov=src --cov-report=xml --cov-report=term-missing
65+
uv run pytest -q --cov=src/videoannotator --cov-report=xml --cov-report=term-missing
7966
8067
- name: Upload coverage to Codecov
8168
uses: codecov/codecov-action@v3
@@ -88,15 +75,20 @@ jobs:
8875
integration-test:
8976
runs-on: ubuntu-latest
9077
needs: test
91-
if: github.event_name == 'push' || github.event_name == 'pull_request'
78+
if: github.event_name == 'workflow_dispatch'
9279

9380
steps:
9481
- uses: actions/checkout@v4
9582

96-
- name: Set up Python 3.9
83+
- name: Set up Python 3.12
9784
uses: actions/setup-python@v4
9885
with:
99-
python-version: 3.9
86+
python-version: "3.12"
87+
88+
- name: Set up uv
89+
uses: astral-sh/setup-uv@v5
90+
with:
91+
enable-cache: true
10092

10193
- name: Install system dependencies
10294
run: |
@@ -106,37 +98,34 @@ jobs:
10698
10799
- name: Install dependencies
108100
run: |
109-
python -m pip install --upgrade pip
110-
pip install -e .[all]
111-
112-
- name: Download test data
113-
run: |
114-
# Create test data directory
115-
mkdir -p tests/data
116-
# Download sample video (replace with actual test data)
117-
curl -o tests/data/sample_video.mp4 "https://sample-videos.com/zip/10/mp4/SampleVideo_360x240_1mb.mp4"
101+
uv sync --dev
118102
119103
- name: Run integration tests
120104
run: |
121-
pytest tests/ -v -m integration --cov=src --cov-report=xml
105+
uv run pytest -q -m integration
122106
123107
- name: Test CLI interface
124108
run: |
125-
uv run python -m src.cli --help
126-
uv run python -m src.cli server --help
109+
uv run videoannotator --help
110+
uv run videoannotator server --help
127111
128112
performance-test:
129113
runs-on: ubuntu-latest
130114
needs: test
131-
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
115+
if: github.event_name == 'workflow_dispatch'
132116

133117
steps:
134118
- uses: actions/checkout@v4
135119

136-
- name: Set up Python 3.9
120+
- name: Set up Python 3.12
137121
uses: actions/setup-python@v4
138122
with:
139-
python-version: 3.9
123+
python-version: "3.12"
124+
125+
- name: Set up uv
126+
uses: astral-sh/setup-uv@v5
127+
with:
128+
enable-cache: true
140129

141130
- name: Install system dependencies
142131
run: |
@@ -146,17 +135,11 @@ jobs:
146135
147136
- name: Install dependencies
148137
run: |
149-
python -m pip install --upgrade pip
150-
pip install -e .[all]
151-
152-
- name: Download test data
153-
run: |
154-
mkdir -p tests/data
155-
curl -o tests/data/sample_video.mp4 "https://sample-videos.com/zip/10/mp4/SampleVideo_360x240_1mb.mp4"
138+
uv sync --dev
156139
157140
- name: Run performance tests
158141
run: |
159-
pytest tests/ -v -m performance --benchmark-only --benchmark-json=benchmark.json
142+
uv run pytest -q -m performance --benchmark-only --benchmark-json=benchmark.json
160143
161144
- name: Store benchmark result
162145
uses: benchmark-action/github-action-benchmark@v1
@@ -195,10 +178,10 @@ jobs:
195178
steps:
196179
- uses: actions/checkout@v4
197180

198-
- name: Set up Python 3.9
181+
- name: Set up Python 3.12
199182
uses: actions/setup-python@v4
200183
with:
201-
python-version: 3.9
184+
python-version: "3.12"
202185

203186
- name: Install build dependencies
204187
run: |
@@ -222,7 +205,7 @@ jobs:
222205
docker-build:
223206
runs-on: ubuntu-latest
224207
needs: [test, integration-test]
225-
if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop')
208+
if: github.event_name == 'push' && (github.ref == 'refs/heads/master' || github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop') && secrets.DOCKER_USERNAME != '' && secrets.DOCKER_PASSWORD != ''
226209

227210
steps:
228211
- uses: actions/checkout@v4

CHANGELOG.md

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,16 +15,22 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1515
- Benchmark results and performance validation
1616
- Additional contributor documentation improvements
1717

18-
## [1.4.1] - 2025-12-18
18+
## [1.4.1] - 2025-12-26
1919

20-
### Documentation & UX Polish
20+
### Release Quality, Docs, and Developer Experience
21+
22+
#### Added
23+
- **Container/Devcontainer**: Baked `hadolint` into Docker images and devcontainer so pre-commit hooks work reliably.
24+
- **Dockerfiles**: Added `git-lfs` to CPU/GPU Dockerfiles for smoother model/asset workflows.
2125

2226
#### Changed
23-
- **Documentation**: Archived historical/versioned docs under `docs/archive/` and updated inbound links.
24-
- **Documentation**: Standardized examples on the canonical API port `18011` and Docker Compose-first workflows.
25-
- **CLI/Logging**: Removed stale v1.2.0 references and ensured first-run messaging reflects the current package version.
27+
- **Documentation**: Consolidated the JOSS manuscript into `paper/paper.md` and replaced `docs/joss.md` with a pointer to avoid divergence.
28+
- **Repository Hygiene**: Moved top-level helper scripts into organized subfolders under `scripts/` and updated imports to the `videoannotator.*` package namespace.
29+
- **Entrypoints**: Updated `api_server.py` to act as a compatibility wrapper; documentation now recommends using the `videoannotator` CLI.
30+
- **README**: Rationalized repeated setup/install instructions, fixed broken/non-links, and replaced hard-coded test/coverage claims with CI status.
2631

2732
#### Fixed
33+
- **Docs**: Standardized examples on the canonical API port `18011` and corrected Docker run port mappings.
2834
- **Docs**: Replaced placeholder `docs/usage/accessing_results.md` with a real results retrieval guide.
2935

3036
## [1.4.0] - 2025-12-15

README.md

Lines changed: 24 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
[![Python 3.12+](https://img.shields.io/badge/python-3.12+-blue.svg)](https://www.python.org/downloads/)
66
[![uv](https://img.shields.io/badge/uv-package%20manager-FF4B4B?logo=uv&logoColor=white)](https://github.com/astral-sh/uv)
77
[![Docker](https://img.shields.io/badge/Docker-GPU%20Ready-2496ED?logo=docker&logoColor=white)](https://docs.docker.com/)
8-
[![Tests](https://img.shields.io/badge/tests-720%20passing%20(94.4%25)-success.svg)](tests/)
8+
[![CI](https://github.com/InfantLab/VideoAnnotator/actions/workflows/ci-cd.yml/badge.svg)](https://github.com/InfantLab/VideoAnnotator/actions/workflows/ci-cd.yml)
99
[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/InfantLab/VideoAnnotator)
1010

1111
**Automated video analysis toolkit for human interaction research** - Extract comprehensive behavioral annotations from videos using AI pipelines, with an intuitive web interface for visualization and analysis.
@@ -34,7 +34,7 @@ VideoAnnotator provides both **automated processing** and **interactive visualiz
3434
- Supports batch processing and custom configurations
3535
- Outputs standardized JSON data
3636

37-
### 🌐 **[Video Annotation Viewer](https://github.com/InfantLab/video-annotation-viewer)**
37+
### 🌐 **[Video Annotation Viewer](https://github.com/InfantLab/video-annotation-viewer)** (paired repository)
3838

3939
**Interactive web-based visualization tool**
4040

@@ -87,27 +87,27 @@ git clone https://github.com/InfantLab/video-annotation-viewer.git
8787
cd video-annotation-viewer
8888
npm install
8989
npm run dev
90+
```
9091

91-
Note: Ensure Node and NPM are installed. On macOS with Homebrew:
92-
brew install node
92+
Note: Ensure Node and NPM are installed. On macOS with Homebrew: `brew install node`
9393

94-
# Open http://localhost:3000 and load your VideoAnnotator results
95-
```
94+
Open http://localhost:3000 and load your VideoAnnotator results.
9695

9796
**🎉 That's it!** You now have both automated video processing and interactive visualization.
9897

9998
## 🧠 AI Pipelines & Capabilities
10099

101100
Authoritative pipeline metadata (names, tasks, modalities, capabilities) is generated from the registry:
102101

103-
- Pipeline specification table: `docs/pipelines_spec.md` (auto-generated; do not edit by hand)
104-
- Emotion output format spec: `docs/specs/emotion_output_format.md`
102+
- Pipeline specification table: [docs/pipelines_spec.md](docs/pipelines_spec.md) (auto-generated; do not edit by hand)
103+
- Pipeline API endpoint: http://localhost:18011/api/v1/pipelines
104+
- Emotion output format spec: [docs/development/emotion_output_format.md](docs/development/emotion_output_format.md)
105105

106106
Additional Specs:
107107

108-
- Output Naming Conventions: `docs/specs/output_naming_conventions.md` (stable patterns for downstream tooling)
109-
- Emotion Validator Utility: `src/validation/emotion_validator.py` (programmatic validation of `.emotion.json` files)
110-
- CLI Validation: `videoannotator validate-emotion path/to/file.emotion.json` returns non-zero exit on failure
108+
- Output Naming Conventions: [docs/development/output_naming_conventions.md](docs/development/output_naming_conventions.md) (stable patterns for downstream tooling)
109+
- Emotion Validator Utility: [src/videoannotator/validation/emotion_validator.py](src/videoannotator/validation/emotion_validator.py) (programmatic validation of `.emotion.json` files)
110+
- CLI Validation: `uv run videoannotator validate-emotion path/to/file.emotion.json` returns non-zero exit on failure
111111
Client tools (e.g. the Video Annotation Viewer) should rely on those sources or the `/api/v1/pipelines` endpoint rather than hard-coding pipeline assumptions.
112112

113113
### **Person Tracking Pipeline**
@@ -118,7 +118,7 @@ Additional Specs:
118118

119119
### **Face Analysis Pipeline**
120120

121-
- **Technology**: OpenFace 3.0, LAION Face, OpenCV backends
121+
- **Technology**: [OpenFace 3.0](https://github.com/CMU-MultiComp-Lab/OpenFace-3.0), LAION Face ([LAION](https://laion.ai/)), OpenCV backends
122122
- **Outputs**: 68-point landmarks, emotions, action units, gaze direction, head pose
123123
- **Use cases**: Emotional analysis, attention tracking, facial expression studies
124124

@@ -212,70 +212,32 @@ VideoAnnotator generates rich, structured data like this:
212212

213213
## 🔗 Integration & Export
214214

215-
### **Direct Integration**
216-
217-
- **Python**: Import JSON data into pandas, matplotlib, seaborn
218-
- **R**: Load data with jsonlite, analyze with tidyverse
219-
- **MATLAB**: Process JSON with built-in functions
220-
221-
### **Annotation Tools**
222-
223-
- **CVAT**: Computer Vision Annotation Tool integration
224-
- **LabelStudio**: Machine learning annotation platform
225-
- **ELAN**: Linguistic annotation software compatibility
215+
VideoAnnotator produces machine-readable outputs (primarily JSON files and API responses) intended to be easy to consume from common data tools.
226216

227-
### **Analysis Platforms**
217+
- **Python**: Load JSON into pandas / numpy for analysis (see [examples/](examples/))
218+
- **R / MATLAB**: Not currently supported with official helper packages, but the JSON outputs can be consumed using standard JSON readers
219+
- **Visualization**: Use the companion [Video Annotation Viewer](https://github.com/InfantLab/video-annotation-viewer) for interactive playback + overlays
228220

229-
- **Video Annotation Viewer**: Interactive web-based analysis (recommended)
230-
- **Custom dashboards**: Build with our REST API
231-
- **Jupyter notebooks**: Examples included in repository
232-
233-
## 🛠️ Installation & Usage
234-
235-
### **Method 1: Direct Installation (Recommended)**
236-
237-
```bash
238-
# Modern Python environment
239-
curl -LsSf https://astral.sh/uv/install.sh | sh
240-
git clone https://github.com/InfantLab/VideoAnnotator.git
241-
cd VideoAnnotator
242-
uv sync
221+
## 🛠️ Installation Options
243222

244-
# Start processing
245-
uv run videoannotator server --host 0.0.0.0 --port 18011
246-
```
223+
The quickstart above covers the recommended local install via `uv`. For more detail, see the [installation guide](docs/installation/INSTALLATION.md).
247224

248-
### **Method 2: Docker (Production)**
225+
### **Docker (CPU/GPU)**
249226

250227
```bash
251228
# CPU version (lightweight)
252229
docker build -f Dockerfile.cpu -t videoannotator:cpu .
253-
docker run -p 18011:8000 videoannotator:cpu
230+
docker run -p 18011:18011 videoannotator:cpu
254231

255232
# GPU version (faster processing)
256233
docker build -f Dockerfile.gpu -t videoannotator:gpu .
257-
docker run -p 18011:8000 --gpus all videoannotator:gpu
234+
docker run -p 18011:18011 --gpus all videoannotator:gpu
258235

259236
# Development version (pre-cached models)
260237
docker build -f Dockerfile.dev -t videoannotator:dev .
261238
docker run -p 18011:18011 --gpus all videoannotator:dev
262239
```
263240

264-
### **Method 3: Research Platform Integration**
265-
266-
```python
267-
# Python API for custom workflows
268-
from videoannotator import VideoAnnotator
269-
270-
annotator = VideoAnnotator()
271-
results = annotator.process("video.mp4", pipelines=["person", "face"])
272-
273-
# Analyze results
274-
import pandas as pd
275-
df = pd.DataFrame(results['person_tracking'])
276-
print(f"Detected {df['person_id'].nunique()} unique people")
277-
```
278-
279241
## 📚 Documentation & Resources
280242

281243
| Resource | Description |
@@ -284,7 +246,7 @@ print(f"Detected {df['person_id'].nunique()} unique people")
284246
| **[🎮 Live API Testing](http://localhost:18011/docs)** | Interactive API when server is running |
285247
| **[🚀 Getting Started Guide](docs/usage/GETTING_STARTED.md)** | Step-by-step setup and first video |
286248
| **[🔧 Installation Guide](docs/installation/INSTALLATION.md)** | Detailed installation instructions |
287-
| **[⚙️ Pipeline Specifications](docs/usage/pipeline_specs.md)** | Technical pipeline documentation |
249+
| **[⚙️ Pipeline Specifications](docs/pipelines_spec.md)** | Auto-generated pipeline spec table |
288250
| **[🎯 Demo Commands](docs/usage/demo_commands.md)** | Example commands and workflows |
289251

290252
## 👥 Research Applications
@@ -342,7 +304,7 @@ print(f"Detected {df['person_id'].nunique()} unique people")
342304

343305
### **Development**
344306

345-
- **Code quality**: 83% test coverage, modern Python practices
307+
- **Code quality**: Automated linting, typing checks, and tests (see the CI badge above)
346308
- **Documentation**: Comprehensive guides and API documentation
347309
- **CI/CD**: Automated testing and deployment pipelines
348310
- **Standards**: Following research software engineering best practices
@@ -355,7 +317,7 @@ If you use VideoAnnotator in your research, please cite:
355317

356318
```
357319
Addyman, C. (2025). VideoAnnotator: Automated video analysis toolkit for human interaction research.
358-
Zenodo. https://Zenodo. doi.org/10.5281/zenodo.16961751
320+
Zenodo. https://doi.org/10.5281/zenodo.16961751
359321
```
360322

361323
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.16961751.svg)](https://doi.org/10.5281/zenodo.16961751)

0 commit comments

Comments
 (0)