diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..6f1c3d7 --- /dev/null +++ b/.flake8 @@ -0,0 +1,11 @@ +[flake8] +max-line-length = 120 +extend-ignore = E203, W503 +exclude = + .venv, + .git, + __pycache__, + .pytest_cache, + .mypy_cache, + build, + dist diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..48b5e75 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,33 @@ +--- +name: Bug Report +about: Create a report to help us improve +title: '[BUG] ' +labels: ['bug'] +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Environment (please complete the following information):** + - OS: [e.g. Ubuntu 20.04] + - Python version: [e.g. 3.11] + - OpenProject version: [e.g. 12.0] + - MCP Server version: [e.g. 1.0.0] + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..d2fc7b9 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature Request +about: Suggest an idea for this project +title: '[FEATURE] ' +labels: ['enhancement'] +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..f3fa6ac --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,19 @@ +version: 2 +updates: + # GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: "ci" + include: "scope" + + # Python dependencies + - package-ecosystem: "pip" + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: "deps" + include: "scope" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..fef809c --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,30 @@ +## Description +Brief description of the changes in this PR. + +## Type of Change +- [ ] Bug fix (non-breaking change which fixes an issue) +- [ ] New feature (non-breaking change which adds functionality) +- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) +- [ ] Documentation update +- [ ] Refactoring (no functional changes) +- [ ] Performance improvement +- [ ] Test improvements + +## Testing +- [ ] Unit tests pass +- [ ] E2E tests pass +- [ ] Manual testing completed +- [ ] New tests added for new functionality + +## Checklist +- [ ] Code follows the project's coding standards +- [ ] Self-review completed +- [ ] Code is properly commented +- [ ] Documentation updated (if applicable) +- [ ] No breaking changes (or clearly documented) + +## Related Issues +Fixes #(issue number) + +## Additional Notes +Any additional information that reviewers should know. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..b749fed --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,107 @@ +name: CI + +on: + pull_request: + branches: [main] + push: + branches: [main] + +permissions: + contents: read + pull-requests: read + +jobs: + lint: + name: Lint & Format Check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up uv + uses: astral-sh/setup-uv@v7 + with: + enable-cache: true + + - name: Set up Python + run: uv python install 3.12 + + - name: Install dependencies + run: uv sync --frozen --dev + + - name: Lint and format check with Ruff + run: | + uv run ruff check . + uv run ruff format --check . + + - name: Type check with mypy + run: uv run mypy src/server.py + continue-on-error: true # Report issues but don't fail build + + test: + name: Test (Python ${{ matrix.python-version }}) + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up uv + uses: astral-sh/setup-uv@v7 + with: + enable-cache: true + + - name: Set up Python ${{ matrix.python-version }} + run: uv python install ${{ matrix.python-version }} + + - name: Install dependencies + run: uv sync --frozen --dev + + - name: Create junit directory + run: mkdir -p junit + + - name: Run tests + run: uv run pytest tests/ --junitxml=junit/test-results-${{ matrix.python-version }}.xml + + - name: Upload test results + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-results-${{ matrix.python-version }} + path: junit/test-results-*.xml + + build: + name: Build Package + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up uv + uses: astral-sh/setup-uv@v7 + with: + enable-cache: true + + - name: Set up Python + run: uv python install 3.12 + + - name: Build package + run: uv build + + - name: Verify build outputs + run: | + test -f dist/*.whl + test -f dist/*.tar.gz + + - name: Upload build artifacts + uses: actions/upload-artifact@v4 + with: + name: dist + path: dist/ diff --git a/.github/workflows/dependency-updates.yml b/.github/workflows/dependency-updates.yml new file mode 100644 index 0000000..a14da55 --- /dev/null +++ b/.github/workflows/dependency-updates.yml @@ -0,0 +1,61 @@ +name: Dependency Updates + +on: + schedule: + - cron: '0 9 * * 1' # Run weekly on Mondays at 9 AM + workflow_dispatch: + +jobs: + update-dependencies: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Install uv + uses: astral-sh/setup-uv@v2 + with: + version: "latest" + + - name: Update dependencies + run: | + uv lock --upgrade + + - name: Check for changes + id: changes + run: | + if git diff --quiet; then + echo "changed=false" >> $GITHUB_OUTPUT + else + echo "changed=true" >> $GITHUB_OUTPUT + fi + + - name: Create Pull Request + if: steps.changes.outputs.changed == 'true' + uses: peter-evans/create-pull-request@v5 + with: + token: ${{ secrets.GITHUB_TOKEN }} + commit-message: "chore: update dependencies" + title: "chore: update dependencies" + body: | + This PR updates project dependencies to their latest versions. + + ## Changes + - Updated dependencies in `uv.lock` + + ## Testing + - [ ] All tests pass + - [ ] No breaking changes detected + branch: dependency-updates + delete-branch: true + labels: | + dependencies + automated diff --git a/.github/workflows/e2e-tests.yml b/.github/workflows/e2e-tests.yml new file mode 100644 index 0000000..62557c8 --- /dev/null +++ b/.github/workflows/e2e-tests.yml @@ -0,0 +1,174 @@ +name: CI/CD Pipeline + +on: + push: + branches: [ main, develop ] + pull_request: + branches: [ main ] + workflow_dispatch: + +jobs: + lint-and-format: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.11', '3.12'] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install uv + uses: astral-sh/setup-uv@v2 + with: + version: "latest" + + - name: Cache uv dependencies + uses: actions/cache@v3 + with: + path: ~/.cache/uv + key: ${{ runner.os }}-uv-${{ hashFiles('**/uv.lock') }} + restore-keys: | + ${{ runner.os }}-uv- + + - name: Install dependencies + run: | + uv sync --extra dev + + - name: Run Black formatter + run: | + uv run black --check --diff --line-length=120 . + + - name: Run Flake8 linter + run: | + uv run flake8 . + + - name: Run pre-commit hooks + run: | + uv run pre-commit run --all-files + + unit-tests: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.11', '3.12'] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install uv + uses: astral-sh/setup-uv@v2 + with: + version: "latest" + + - name: Cache uv dependencies + uses: actions/cache@v3 + with: + path: ~/.cache/uv + key: ${{ runner.os }}-uv-${{ hashFiles('**/uv.lock') }} + restore-keys: | + ${{ runner.os }}-uv- + + - name: Install dependencies + run: | + uv sync --extra dev + + - name: Run unit tests + run: | + uv run pytest tests/test_unit.py -v --cov=. --cov-report=xml + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + file: ./coverage.xml + flags: unittests + name: codecov-umbrella + fail_ci_if_error: false + + e2e-tests: + runs-on: ubuntu-latest + needs: [lint-and-format, unit-tests] + if: always() && (needs.lint-and-format.result == 'success' && needs.unit-tests.result == 'success') + permissions: + contents: read + packages: read + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Install uv + uses: astral-sh/setup-uv@v2 + with: + version: "latest" + + - name: Cache uv dependencies + uses: actions/cache@v3 + with: + path: ~/.cache/uv + key: ${{ runner.os }}-uv-${{ hashFiles('**/uv.lock') }} + restore-keys: | + ${{ runner.os }}-uv- + + - name: Install dependencies + run: | + uv sync --extra dev + + - name: Cache Docker layers + uses: actions/cache@v3 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + + - name: Start OpenProject and MCP Server + run: | + # Generate a test API key + export OPENPROJECT_API_KEY="test-api-key-$(date +%s)" + + # Start services + docker compose up -d + + # Wait for services to be ready + timeout 300 bash -c 'until curl -f http://localhost:8080/; do sleep 10; done' + + - name: Run E2E tests + run: | + export OPENPROJECT_URL="http://localhost:8080" + export OPENPROJECT_API_KEY="test-api-key-$(date +%s)" + export MCP_SERVER_URL="http://localhost:8080" + + # Run the E2E test suite + docker compose run --rm test-runner + + - name: Stop services + if: always() + run: | + docker compose down -v + + - name: Upload test results + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-results + path: | + test-results/ + logs/ + retention-days: 7 diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 0000000..25624a1 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,80 @@ +name: Publish to PyPI + +on: + push: + tags: + - 'v*' # Trigger on any tag starting with 'v' (e.g., v1.0.0, v1.2.3) + +permissions: + contents: write # Required to create GitHub releases + id-token: write # Required for PyPI trusted publishing + +jobs: + build: + name: Build Distribution + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up uv + uses: astral-sh/setup-uv@v7 + with: + enable-cache: true + + - name: Set up Python + run: uv python install 3.12 + + - name: Build package + run: uv build + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: dist + path: dist/ + + publish: + name: Publish to PyPI + runs-on: ubuntu-latest + needs: build + environment: + name: pypi + url: https://pypi.org/project/openproject-mcp-server + + steps: + - name: Download artifacts + uses: actions/download-artifact@v4 + with: + name: dist + path: dist/ + + - name: Set up uv + uses: astral-sh/setup-uv@v7 + + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + attestations: true + + create-release: + name: Create GitHub Release + runs-on: ubuntu-latest + needs: publish + permissions: + contents: write + + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Create GitHub Release + env: + GH_TOKEN: ${{ github.token }} + run: | + gh release create ${{ github.ref_name }} \ + --title "${{ github.ref_name }}" \ + --generate-notes \ + --latest diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..08ee259 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,96 @@ +name: Release + +on: + push: + tags: + - 'v*' + workflow_dispatch: + inputs: + version: + description: 'Version to release (e.g., v1.0.0)' + required: true + type: string + +jobs: + release: + runs-on: ubuntu-latest + permissions: + contents: write + packages: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Install uv + uses: astral-sh/setup-uv@v2 + with: + version: "latest" + + - name: Install dependencies + run: | + uv sync --extra dev + + - name: Run tests + run: | + uv run pytest tests/test_unit.py -v + + - name: Build package + run: | + uv build + + - name: Generate changelog + id: changelog + run: | + # Get the previous tag + PREVIOUS_TAG=$(git describe --tags --abbrev=0 HEAD~1 2>/dev/null || echo "") + + if [ -z "$PREVIOUS_TAG" ]; then + echo "changelog=Initial release" >> $GITHUB_OUTPUT + else + echo "changelog<> $GITHUB_OUTPUT + git log --pretty=format:"- %s" ${PREVIOUS_TAG}..HEAD >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + fi + + - name: Create Release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.ref_name || inputs.version }} + release_name: Release ${{ github.ref_name || inputs.version }} + body: | + ## Changes + ${{ steps.changelog.outputs.changelog }} + + ## Installation + ```bash + pip install openproject-mcp-server + ``` + draft: false + prerelease: ${{ contains(github.ref_name || inputs.version, 'alpha') || contains(github.ref_name || inputs.version, 'beta') || contains(github.ref_name || inputs.version, 'rc') }} + + - name: Upload Release Assets + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: ./dist/ + asset_name: openproject-mcp-server-${{ github.ref_name || inputs.version }}.tar.gz + asset_content_type: application/gzip + + - name: Publish to PyPI + if: github.event_name == 'push' && !contains(github.ref_name || inputs.version, 'alpha') && !contains(github.ref_name || inputs.version, 'beta') && !contains(github.ref_name || inputs.version, 'rc') + run: | + uv publish + env: + PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml new file mode 100644 index 0000000..4e3dd60 --- /dev/null +++ b/.github/workflows/security.yml @@ -0,0 +1,86 @@ +name: Security + +on: + pull_request: + branches: [main] + push: + branches: [main, develop] + schedule: + - cron: '0 0 * * 0' # Weekly on Sunday at midnight UTC + +permissions: + contents: read + pull-requests: write # For dependency review comments + +jobs: + dependency-review: + name: Dependency Review + runs-on: ubuntu-latest + if: github.event_name == 'pull_request' + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Dependency Review + uses: actions/dependency-review-action@v4 + with: + fail-on-severity: high + + sbom: + name: Generate SBOM + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up uv + uses: astral-sh/setup-uv@v7 + with: + enable-cache: true + + - name: Set up Python + run: uv python install 3.12 + + - name: Install dependencies + run: uv sync --frozen --all-extras + + - name: Generate SBOM + run: uv run cyclonedx-py environment -o sbom.json --of json + + - name: Upload SBOM + uses: actions/upload-artifact@v4 + with: + name: sbom + path: sbom.json + retention-days: 90 + + codeql: + name: CodeQL Analysis + runs-on: ubuntu-latest + permissions: + security-events: write + actions: read + contents: read + + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: python + + - name: Set up uv + uses: astral-sh/setup-uv@v2 + with: + version: "latest" + + - name: Install dependencies + run: uv sync --extra dev + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 diff --git a/.gitignore b/.gitignore index 69cc96d..db21474 100644 --- a/.gitignore +++ b/.gitignore @@ -98,7 +98,7 @@ ipython_config.py # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. # This is especially recommended for binary packages to ensure reproducibility, and is more # commonly ignored for libraries. -#uv.lock +# uv.lock is now tracked for reproducible CI builds # poetry # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. @@ -207,4 +207,7 @@ marimo/_lsp/ __marimo__/ # Claude Code configuration -CLAUDE.md \ No newline at end of file +CLAUDE.md + +# GitHub Actions planning document (local use only) +GITHUB_ACTIONS_PLAN.md \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..709580e --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,23 @@ +repos: + - repo: https://github.com/psf/black + rev: 25.1.0 + hooks: + - id: black + language_version: python3 + args: [--line-length=120] + + - repo: https://github.com/pycqa/flake8 + rev: 7.3.0 + hooks: + - id: flake8 + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + - id: check-merge-conflict + - id: debug-statements + - id: check-docstring-first diff --git a/CI_CD.md b/CI_CD.md new file mode 100644 index 0000000..458b677 --- /dev/null +++ b/CI_CD.md @@ -0,0 +1,125 @@ +# CI/CD Documentation + +## Overview + +This repository uses GitHub Actions for automated testing, security scanning, and publishing. + +## Workflows + +### CI Workflow (`.github/workflows/ci.yml`) + +**Triggers:** Push to main, Pull requests + +**Purpose:** Run tests, linting, type checking, and build verification + +**Jobs:** +- **Lint & Format Check** + - Ruff linting and formatting + - Type checking with mypy +- **Test Matrix** + - Python 3.10, 3.11, 3.12, 3.13 + - Parallel execution across versions +- **Build Package** + - Verifies package builds successfully + - Uploads build artifacts + +**Estimated Runtime:** 3-5 minutes + +--- + +### Publish Workflow (`.github/workflows/publish.yml`) + +**Triggers:** Tags matching `v*` + +**Purpose:** Automated PyPI publishing via Trusted Publishing + +**Jobs:** +- **Build Distribution** - Creates wheel and source distribution +- **Publish to PyPI** - Publishes with digital attestations (PEP 740) +- **Create GitHub Release** - Auto-generates release notes + +**Estimated Runtime:** 2-3 minutes + +**Features:** +- PyPI Trusted Publishing (no API tokens) +- Digital attestations for supply chain security +- Automatic GitHub release creation + +--- + +### Security Workflow (`.github/workflows/security.yml`) + +**Triggers:** Push to main, Pull requests, Weekly schedule (Sundays) + +**Purpose:** Dependency scanning, SBOM generation, code analysis + +**Jobs:** +- **Dependency Review** - Blocks PRs with high-severity vulnerabilities +- **SBOM Generation** - CycloneDX format for compliance +- **CodeQL Analysis** - Code-level security scanning + +**Estimated Runtime:** 5-10 minutes + +**Features:** +- Dependency vulnerability blocking +- Software Bill of Materials (SBOM) +- CodeQL for detecting security issues +- Weekly automated scans + +--- + +## Technologies + +| Tool | Purpose | +|------|---------| +| **uv** | Fast Python package manager | +| **Ruff** | Modern linting and formatting | +| **mypy** | Static type checking | +| **pytest** | Testing framework | +| **CodeQL** | Security code scanning | +| **PyPI Trusted Publishing** | Secure publishing (OIDC) | + +## Workflow Status + +Check workflow runs: https://github.com/kingfly55/openproject-mcp-server/actions + +## Local Testing + +### Run CI checks locally: + +```bash +# Install dev dependencies +uv sync --dev + +# Run linting +uv run ruff check . +uv run ruff format --check . + +# Run type checking +uv run mypy src/server.py + +# Run tests +uv run pytest tests/ + +# Build package +uv build +``` + +### Test with act: + +```bash +# Install act (GitHub Actions local runner) +brew install act # macOS +# or: https://github.com/nektos/act + +# Run CI workflow locally +act push -W .github/workflows/ci.yml +``` + +## Configuration Files + +- `.github/workflows/ci.yml` - CI workflow +- `.github/workflows/publish.yml` - Publishing workflow +- `.github/workflows/security.yml` - Security scanning +- `.github/dependabot.yml` - Automated dependency updates +- `pyproject.toml` - Project config + tool settings diff --git a/Dockerfile b/Dockerfile index bb683be..ba8a4ca 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ -FROM python:3.14-slim +FROM python:3.11-slim WORKDIR /app RUN python -m venv .venv COPY requirements.txt . RUN pip install -r requirements.txt COPY . . -CMD ["python", "openproject-mcp.py"] \ No newline at end of file +CMD ["python", "openproject-mcp-fastmcp.py"] \ No newline at end of file diff --git a/Dockerfile.test b/Dockerfile.test new file mode 100644 index 0000000..7747288 --- /dev/null +++ b/Dockerfile.test @@ -0,0 +1,31 @@ +FROM python:3.11-slim + +# Set working directory +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + curl \ + jq \ + && rm -rf /var/lib/apt/lists/* + +# Copy requirements and install Python dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Install additional testing dependencies +RUN pip install --no-cache-dir \ + pytest \ + pytest-asyncio \ + requests \ + aiohttp + +# Copy the application and tests +COPY . . + +# Create a non-root user +RUN useradd -m -u 1000 testuser && chown -R testuser:testuser /app +USER testuser + +# Default command +CMD ["python", "tests/e2e_test.py"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..0b7a471 --- /dev/null +++ b/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2025 AndyEverything (Original Author) +Copyright (c) 2025 Compass Rose Systems (Fork Maintainer) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index a383d98..3f8ab3e 100644 --- a/README.md +++ b/README.md @@ -17,14 +17,68 @@ A Model Context Protocol (MCP) server that provides seamless integration with [O - ๐Ÿš€ **Async Operations**: Built with modern async/await patterns - ๐Ÿ“Š **Comprehensive Logging**: Configurable logging levels -## Prerequisites +## Quick Start (uvx / pip) + +Install and run without cloning the repository: + +```bash +# Run directly with uvx (no installation required) +uvx openproject-mcp-server + +# Or install globally with pip +pip install openproject-mcp-server +``` + +### Claude Code configuration + +Add to `~/.claude.json` under `mcpServers`: + +```json +{ + "mcpServers": { + "openproject": { + "type": "stdio", + "command": "uvx", + "args": ["openproject-mcp-server"], + "env": { + "OPENPROJECT_URL": "https://your-instance.openproject.com", + "OPENPROJECT_API_KEY": "your-api-key-here" + } + } + } +} +``` + +### Claude Desktop configuration + +**macOS:** `~/Library/Application Support/Claude/claude_desktop_config.json` +**Windows:** `%APPDATA%\Claude\claude_desktop_config.json` + +```json +{ + "mcpServers": { + "openproject": { + "command": "uvx", + "args": ["openproject-mcp-server"], + "env": { + "OPENPROJECT_URL": "https://your-instance.openproject.com", + "OPENPROJECT_API_KEY": "your-api-key-here" + } + } + } +} +``` + +--- + +## Prerequisites (for development / source install) - Python 3.10 or higher - [uv](https://docs.astral.sh/uv/) (fast Python package manager) - An OpenProject instance (cloud or self-hosted) - OpenProject API key (generated from your user profile) -## Installation +## Installation (from source) ### 1. Install uv (if not already installed) diff --git a/RELEASING.md b/RELEASING.md new file mode 100644 index 0000000..478df89 --- /dev/null +++ b/RELEASING.md @@ -0,0 +1,91 @@ +# Release Process + +This document explains how to release a new version of openproject-mcp-server. + +## Prerequisites + +- Push access to main branch +- PyPI Trusted Publisher configured for openproject-mcp-server +- Admin access to repository (for creating tags) + +## Release Steps + +### 1. Update Version + +Edit `pyproject.toml`: +```toml +[project] +version = "1.0.3" # Update this +``` + +### 2. Commit and Tag + +```bash +git add pyproject.toml +git commit -m "Bump version to 1.0.3" +git tag v1.0.3 +git push origin main --tags +``` + +### 3. Automated Publishing + +The publish workflow automatically: +1. Builds the package +2. Publishes to PyPI with Trusted Publishing +3. Creates digital attestations (PEP 740) +4. Creates GitHub release with auto-generated notes + +Monitor: https://github.com/kingfly55/openproject-mcp-server/actions/workflows/publish.yml + +## Versioning + +This project follows [Semantic Versioning](https://semver.org/). + +- **MAJOR** version for incompatible API changes +- **MINOR** version for backwards-compatible functionality +- **PATCH** version for backwards-compatible bug fixes + +## PyPI Trusted Publisher + +Ensure Trusted Publisher is configured at https://pypi.org/manage/account/publishing/ + +Configuration: +- PyPI Project Name: `openproject-mcp-server` +- Owner: `kingfly55` +- Repository: `openproject-mcp-server` +- Workflow: `publish.yml` +- Environment: `pypi` + +## Troubleshooting + +### Tag Not Triggering Publish + +**Check:** +- Tag matches pattern `v*` (e.g., v1.0.3) +- Tag was pushed to GitHub +- Workflow file exists at `.github/workflows/publish.yml` + +### Publish Fails - Trusted Publishing + +**Solutions:** +1. Verify PyPI Trusted Publisher settings match exactly +2. Check environment `pypi` exists in repository settings +3. Ensure workflow has `id-token: write` permission + +### Version Already Published + +**Error:** Package version already exists on PyPI + +**Solution:** +1. Bump version to next patch/minor/major +2. Create new tag +3. Never reuse version numbers + +## First Release + +For the first release after setting up Trusted Publishing: + +1. Configure pending publisher on PyPI (see above) +2. Create and push tag +3. Workflow will establish the publisher relationship +4. Future releases work automatically diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 0000000..e33e566 --- /dev/null +++ b/TESTING.md @@ -0,0 +1,223 @@ +# Testing + +This project includes comprehensive testing setup with both unit tests and end-to-end tests using Docker Compose. + +## Pre-commit Hooks + +This project uses pre-commit hooks to ensure code quality and consistency. The hooks include: + +- **Black**: Code formatting +- **Flake8**: Linting and style checking +- **Pre-commit hooks**: Various checks for trailing whitespace, file endings, YAML syntax, etc. + +### Setup Pre-commit Hooks + +```bash +# Install pre-commit hooks +uv run pre-commit install + +# Run all hooks manually +uv run pre-commit run --all-files + +# Run specific hook +uv run pre-commit run black +uv run pre-commit run flake8 +``` + +### Configuration + +- **Black**: Configured with 120 character line length (see `.pre-commit-config.yaml`) +- **Flake8**: Configured in `.flake8` file with 120 character line length and exclusions for `.venv`, `.git`, etc. + +## Test Structure + +- **Unit Tests** (`tests/test_unit.py`): Test individual components without external dependencies +- **E2E Tests** (`tests/e2e_test.py`): Test complete functionality against a real OpenProject instance +- **Test Data Setup** (`tests/setup_test_data.py`): Script to set up test data in OpenProject + +## Running Tests + +### Prerequisites + +- Docker and Docker Compose installed +- Python 3.11+ (for local unit tests) + +### Unit Tests + +Run unit tests locally: + +```bash +# Install dependencies +uv sync --extra dev + +# Run unit tests +uv run pytest tests/test_unit.py -v +``` + +### End-to-End Tests + +Run the complete E2E test suite using Docker Compose: + +```bash +# Make the test runner executable +chmod +x run-e2e-tests.sh + +# Run E2E tests +./run-e2e-tests.sh +``` + +Or run manually: + +```bash +# Start services +docker-compose up -d + +# Wait for OpenProject to be ready +timeout 300 bash -c 'until curl -f http://localhost:8080/api/v3; do sleep 10; done' + +# Run tests +docker-compose run --rm test-runner + +# Cleanup +docker-compose down -v +``` + +### Individual Test Components + +You can also run individual components: + +```bash +# Start only OpenProject +docker-compose up -d postgres redis openproject + +# Run test data setup +docker-compose run --rm test-runner python tests/setup_test_data.py + +# Run specific test +docker-compose run --rm test-runner python tests/e2e_test.py +``` + +## Test Configuration + +### Environment Variables + +The E2E tests use these environment variables: + +- `OPENPROJECT_URL`: OpenProject instance URL (default: http://localhost:8080) +- `OPENPROJECT_API_KEY`: API key for authentication (auto-generated for tests) +- `MCP_SERVER_URL`: MCP server URL (default: http://localhost:8080) +- `LOG_LEVEL`: Logging level (default: DEBUG) + +### Docker Services + +The test setup includes: + +- **PostgreSQL**: Database for OpenProject +- **Redis**: Caching and background jobs +- **OpenProject**: Full OpenProject instance +- **MCP Server**: The MCP server being tested +- **Test Runner**: Executes the test suite + +## Test Coverage + +The E2E tests cover: + +- โœ… API connection testing +- โœ… Project listing and filtering +- โœ… User management +- โœ… Work package type management +- โœ… Priority and status management +- โœ… Work package creation and listing +- โœ… Meeting creation and management +- โœ… Error handling and edge cases + +## Continuous Integration + +GitHub Actions automatically runs the E2E test suite on: + +- Push to main/develop branches +- Pull requests to main +- Manual workflow dispatch + +The CI pipeline: + +1. Sets up Python environment +2. Installs dependencies with uv +3. Starts Docker services +4. Runs E2E tests +5. Cleans up resources +6. Uploads test results + +## Troubleshooting Tests + +### Common Issues + +1. **OpenProject not ready**: Increase the timeout in the test script +2. **Port conflicts**: Ensure ports 8080 and 5432 are available +3. **Permission errors**: Check Docker permissions and file ownership +4. **Import errors**: Ensure the MCP server module is properly installed + +### Debug Mode + +Enable debug logging: + +```bash +export LOG_LEVEL=DEBUG +docker-compose run --rm test-runner python tests/e2e_test.py +``` + +### Manual Testing + +You can manually test the MCP server: + +```bash +# Start services +docker-compose up -d + +# Wait for OpenProject +curl -f http://localhost:8080/api/v3 + +# Test MCP server +docker-compose exec mcp-server python -c " +import asyncio +from openproject_mcp import OpenProjectMCPServer +async def test(): + server = OpenProjectMCPServer() + server.client = OpenProjectClient('http://openproject:8080', 'test-api-key') + result = await server.call_tool('test_connection', {}) + print(result[0].text) +asyncio.run(test()) +" +``` + +## Adding New Tests + +### Unit Tests + +Add new unit tests to `tests/test_unit.py`: + +```python +def test_new_feature(): + """Test new feature""" + # Test implementation + assert True +``` + +### E2E Tests + +Add new E2E tests to `tests/e2e_test.py`: + +```python +async def test_new_feature(self): + """Test new feature end-to-end""" + logger.info("Testing new feature...") + + result = await self.mcp_client.call_tool("new_tool", {}) + + assert "content" in result + assert "expected result" in result["content"][0].text + + logger.info("โœ… New feature test passed") +``` + +Don't forget to add the test to the `run_all_tests()` method. diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md new file mode 100644 index 0000000..bccb959 --- /dev/null +++ b/TROUBLESHOOTING.md @@ -0,0 +1,286 @@ +# CI/CD Troubleshooting + +## CI Workflow Issues + +### Ruff Linting Fails + +**Error:** Linting violations found + +**Solutions:** +```bash +# Run locally to see issues +uv run ruff check . + +# Auto-fix issues +uv run ruff check --fix . + +# Format code +uv run ruff format . +``` + +### Ruff Format Check Fails + +**Error:** Files would be reformatted + +**Solution:** +```bash +# Format code +uv run ruff format . + +# Commit changes +git add . +git commit -m "Format code with Ruff" +``` + +### mypy Type Checking Fails + +**Error:** Type check errors + +**Solutions:** +```bash +# Run locally +uv run mypy src/server.py + +# Option 1: Fix type errors +# Option 2: Add type ignore comments for false positives +# type: ignore[error-code] + +# Option 3: Adjust mypy settings in pyproject.toml +``` + +### Tests Fail on Specific Python Version + +**Error:** Tests pass locally but fail on CI for specific version + +**Solutions:** +```bash +# Install specific Python version locally +uv python install 3.10 + +# Run tests with that version +uv run --python 3.10 pytest tests/ + +# Check for version-specific issues +``` + +### Build Fails + +**Error:** Package build errors + +**Solutions:** +```bash +# Build locally +uv build + +# Check for: +# - Missing files in git +# - Invalid pyproject.toml +# - Build backend issues +``` + +--- + +## Publish Workflow Issues + +### Trusted Publishing Fails + +**Error:** `OIDC token retrieval failed` or `Trusted publishing exchange failure` + +**Possible Causes:** +1. PyPI Trusted Publisher not configured +2. Workflow name doesn't match (`publish.yml`) +3. Environment name doesn't match (`pypi`) +4. Repository/owner mismatch + +**Solution:** +```bash +# Verify PyPI configuration at: +# https://pypi.org/manage/account/publishing/ + +# Check configuration matches: +PyPI Project Name: openproject-mcp-server +Owner: kingfly55 +Repository: openproject-mcp-server +Workflow name: publish.yml +Environment name: pypi +``` + +### Permission Denied + +**Error:** `insufficient permissions` or `403 Forbidden` + +**Solutions:** +1. Check `id-token: write` permission in workflow +2. Verify you're repository admin +3. Check environment `pypi` has correct URL +4. Ensure environment protection rules allow workflow + +### Version Already Exists + +**Error:** `File already exists` on PyPI + +**Solution:** +```bash +# Bump version in pyproject.toml +[project] +version = "1.0.4" # Increment + +# Never reuse version numbers +# PyPI doesn't allow overwriting +``` + +### Tag Doesn't Trigger Workflow + +**Error:** Tag pushed but workflow doesn't run + +**Solutions:** +1. Verify tag matches pattern `v*` + ```bash + git tag v1.0.3 # Correct + git tag 1.0.3 # Won't trigger + ``` +2. Check tag was pushed + ```bash + git push origin v1.0.3 + ``` +3. Verify workflow file exists and is valid + +--- + +## Security Workflow Issues + +### Dependency Review Blocks PR + +**Error:** High-severity vulnerabilities detected + +**Solutions:** +1. Review vulnerability details in PR +2. Update affected dependency: + ```bash + uv sync --upgrade-package vulnerable-package + ``` +3. If false positive, request manual review +4. Check for available security patches + +### CodeQL Fails + +**Error:** CodeQL analysis fails + +**Solutions:** +1. Check for syntax errors in Python code +2. Verify Python version compatibility +3. Review CodeQL logs in Actions tab +4. May need to exclude certain files + +### SBOM Generation Fails + +**Error:** `cyclonedx-py` command fails + +**Solutions:** +```bash +# Test locally +uv run cyclonedx-py environment -o sbom.json --of json + +# Check cyclonedx-bom is installed +uv sync --dev + +# Verify it's in dev dependencies +``` + +--- + +## General Debugging + +### View Workflow Logs + +1. Go to **Actions** tab in GitHub +2. Click on failed workflow run +3. Click on failed job +4. Expand failed step to see detailed logs + +### Re-run Failed Workflows + +1. In Actions tab, click failed run +2. Click **Re-run jobs** โ†’ **Re-run failed jobs** + +### Enable Debug Logging + +Add repository secret `ACTIONS_STEP_DEBUG` = `true` + +Or add to workflow temporarily: +```yaml +env: + ACTIONS_STEP_DEBUG: true +``` + +### Test Workflows Locally + +```bash +# Install act +brew install act # macOS +# or: https://github.com/nektos/act + +# Run specific workflow +act -W .github/workflows/ci.yml + +# Run specific job +act -j lint + +# Use specific runner image +act -P ubuntu-latest=ghcr.io/catthehacker/ubuntu:act-latest +``` + +--- + +## Common Issues + +### uv Cache Issues + +**Error:** Dependencies not updating or cache corruption + +**Solution:** +```bash +# Clear uv cache locally +rm -rf ~/.cache/uv + +# In CI: disable cache temporarily +# Remove or comment out: enable-cache: true +``` + +### Workflow Not Running + +**Check:** +1. Workflow file has valid YAML syntax +2. Triggers are correctly configured +3. Branch/tag names match patterns +4. Repository Actions are enabled + +### Rate Limiting + +**Error:** GitHub API rate limit exceeded + +**Rare but possible:** +- Wait for rate limit reset (usually 1 hour) +- Check if too many workflows running concurrently +- Review Dependabot PR frequency + +--- + +## Getting Help + +**GitHub Actions Issues:** +- Review logs in Actions tab +- Check [GitHub Actions documentation](https://docs.github.com/en/actions) +- Search [GitHub Community forums](https://github.community/) + +**uv Issues:** +- Check [uv documentation](https://docs.astral.sh/uv/) +- Review [uv GitHub issues](https://github.com/astral-sh/uv/issues) + +**PyPI Publishing:** +- [PyPI Help](https://pypi.org/help/) +- [Trusted Publishing docs](https://docs.pypi.org/trusted-publishers/) + +**Ruff Issues:** +- [Ruff documentation](https://docs.astral.sh/ruff/) +- [Configuration reference](https://docs.astral.sh/ruff/configuration/) diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..cb2ff61 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,137 @@ +services: + # PostgreSQL database for OpenProject + postgres: + image: postgres:15 + environment: + POSTGRES_DB: openproject + POSTGRES_USER: openproject + POSTGRES_PASSWORD: openproject + volumes: + - postgres_data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U openproject -d openproject"] + interval: 10s + timeout: 5s + retries: 5 + networks: + - openproject-network + + # Redis for OpenProject caching and background jobs + redis: + image: redis:7-alpine + volumes: + - redis_data:/data + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + networks: + - openproject-network + + # OpenProject application + openproject: + image: openproject/community:13 + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + environment: + # Database configuration + DATABASE_URL: postgres://openproject:openproject@postgres:5432/openproject + + # Redis configuration + REDIS_URL: redis://redis:6379/0 + + # OpenProject configuration + OPENPROJECT_HOST__NAME: localhost:8080 + OPENPROJECT_HTTPS: "false" + OPENPROJECT_SECRET_KEY_BASE: "test-secret-key-base-for-testing-only" + + # Email configuration (disabled for testing) + OPENPROJECT_EMAIL_DELIVERY_METHOD: none + + # Security configuration + OPENPROJECT_SECURE__COOKIES: "false" + + # Production settings for testing + RAILS_ENV: production + RAILS_LOG_LEVEL: info + + # Admin user configuration + OPENPROJECT_ADMIN__USER__NAME: admin + OPENPROJECT_ADMIN__USER__EMAIL: admin@example.com + OPENPROJECT_ADMIN__USER__PASSWORD: admin123 + + # Skip initial setup wizard + OPENPROJECT_SKIP__BROWSER__RELOAD: "true" + ports: + - "8080:8080" + volumes: + - openproject_data:/var/openproject + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/"] + interval: 30s + timeout: 10s + retries: 10 + start_period: 60s + networks: + - openproject-network + + # MCP Server + mcp-server: + build: + context: . + dockerfile: Dockerfile + depends_on: + openproject: + condition: service_healthy + environment: + # OpenProject configuration + OPENPROJECT_URL: http://openproject:8080 + OPENPROJECT_API_KEY: ${OPENPROJECT_API_KEY:-test-api-key} + + # Logging + LOG_LEVEL: DEBUG + TEST_CONNECTION_ON_STARTUP: "true" + volumes: + - .:/app + networks: + - openproject-network + command: ["python", "/app/openproject-mcp.py"] + + # Test runner + test-runner: + build: + context: . + dockerfile: Dockerfile.test + depends_on: + openproject: + condition: service_healthy + mcp-server: + condition: service_started + environment: + # Test configuration + OPENPROJECT_URL: http://openproject:8080 + OPENPROJECT_API_KEY: ${OPENPROJECT_API_KEY:-test-api-key} + MCP_SERVER_URL: http://mcp-server:8080 + + # Test data + TEST_PROJECT_NAME: "E2E Test Project" + TEST_USER_EMAIL: "test@example.com" + TEST_USER_PASSWORD: "test123" + volumes: + - .:/app + networks: + - openproject-network + command: ["python", "/app/tests/simple_e2e_test.py"] + +volumes: + postgres_data: + redis_data: + openproject_data: + +networks: + openproject-network: + driver: bridge diff --git a/env_example.txt b/env_example.txt index 171ee8b..c11140a 100644 --- a/env_example.txt +++ b/env_example.txt @@ -17,3 +17,7 @@ LOG_LEVEL=INFO # Optional: Test connection on startup (true/false) TEST_CONNECTION_ON_STARTUP=true + +# Optional: Read-only mode โ€” blocks ALL write operations (POST/PATCH/PUT/DELETE) +# When true, create/update/delete tools return an error; read tools work normally +READ_ONLY_MODE=false diff --git a/pyproject.toml b/pyproject.toml index f426c81..1aef9d6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,6 +17,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", ] dependencies = [ @@ -30,13 +31,22 @@ dependencies = [ "starlette>=0.27.0", # Required for SSE transport ] +[project.scripts] +openproject-mcp = "src.server:main" + [project.optional-dependencies] dev = [ "pytest>=7.0.0", - "black>=22.0.0", - "flake8>=4.0.0", + "pytest-asyncio>=0.21.0", + "ruff>=0.1.0", + "mypy>=1.0.0", + "cyclonedx-bom>=4.0.0", ] +[tool.pytest.ini_options] +asyncio_mode = "auto" +testpaths = ["tests"] + [build-system] requires = ["hatchling"] build-backend = "hatchling.build" @@ -44,10 +54,24 @@ build-backend = "hatchling.build" [tool.hatch.build.targets.wheel] packages = ["src"] -[tool.black] -line-length = 88 -target-version = ['py38'] +[tool.ruff] +line-length = 120 +target-version = "py310" + +[tool.ruff.lint] +select = ["E", "F", "I", "N", "W"] +ignore = ["E203", "E501"] -[tool.flake8] -max-line-length = 88 -extend-ignore = ["E203", "W503"] +[tool.mypy] +python_version = "3.10" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = false + +[dependency-groups] +dev = [ + "cyclonedx-bom>=7.2.1", + "mypy>=1.19.1", + "pytest>=9.0.2", + "ruff>=0.14.10", +] diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..bd471a1 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,12 @@ +[tool:pytest] +minversion = 6.0 +addopts = -ra -q --strict-markers +testpaths = tests +python_files = test_*.py *_test.py +python_classes = Test* +python_functions = test_* +markers = + slow: marks tests as slow (deselect with '-m "not slow"') + integration: marks tests as integration tests + e2e: marks tests as end-to-end tests +asyncio_mode = auto diff --git a/run-e2e-tests.sh b/run-e2e-tests.sh new file mode 100755 index 0000000..a8d1273 --- /dev/null +++ b/run-e2e-tests.sh @@ -0,0 +1,63 @@ +#!/bin/bash +# E2E Test Runner Script + +set -e + +echo "๐Ÿš€ Starting OpenProject MCP Server E2E Tests" + +# Check if Docker is running +if ! docker info > /dev/null 2>&1; then + echo "โŒ Docker is not running. Please start Docker and try again." + exit 1 +fi + +# Generate test API key +export OPENPROJECT_API_KEY="test-api-key-$(date +%s)" +echo "๐Ÿ”‘ Generated test API key: $OPENPROJECT_API_KEY" + +# Create .env file for testing +cat > .env.test << EOF +OPENPROJECT_URL=http://localhost:8080 +OPENPROJECT_API_KEY=$OPENPROJECT_API_KEY +LOG_LEVEL=DEBUG +TEST_CONNECTION_ON_STARTUP=true +EOF + +echo "๐Ÿ“ Created test environment file" + +# Start services +echo "๐Ÿณ Starting Docker services..." +docker compose --env-file .env.test up -d + +# Wait for OpenProject to be ready +echo "โณ Waiting for OpenProject to be ready..." +timeout 300 bash -c 'until curl -f http://localhost:8080/ > /dev/null 2>&1; do + echo "Waiting for OpenProject..." + sleep 10 +done' + +echo "โœ… OpenProject is ready!" + +# Wait a bit more for full initialization +sleep 30 + +# Run tests +echo "๐Ÿงช Running simplified E2E tests..." +docker compose --env-file .env.test run --rm test-runner + +# Capture exit code +TEST_EXIT_CODE=$? + +# Cleanup +echo "๐Ÿงน Cleaning up..." +docker compose --env-file .env.test down -v +rm -f .env.test + +# Exit with test result +if [ $TEST_EXIT_CODE -eq 0 ]; then + echo "๐ŸŽ‰ All tests passed!" +else + echo "โŒ Tests failed with exit code $TEST_EXIT_CODE" +fi + +exit $TEST_EXIT_CODE diff --git a/src/client.py b/src/client.py index 1d6e8f5..55930fc 100644 --- a/src/client.py +++ b/src/client.py @@ -21,11 +21,24 @@ # Version information __version__ = "2.0.0" +# HTTP methods that modify data โ€” blocked when readonly=True +_WRITE_METHODS = frozenset({"POST", "PATCH", "PUT", "DELETE"}) +# Retry configuration +_MAX_RETRIES = 3 +_RETRY_BASE_DELAY = 1.0 # seconds (doubled on each attempt: 1s, 2s, 4s) +_RETRYABLE_STATUSES = {500, 502, 503, 504} + class OpenProjectClient: """Client for the OpenProject API v3 with optional proxy support""" - def __init__(self, base_url: str, api_key: str, proxy: Optional[str] = None): + def __init__( + self, + base_url: str, + api_key: str, + proxy: Optional[str] = None, + readonly: bool = False, + ): """ Initialize the OpenProject client. @@ -33,10 +46,12 @@ def __init__(self, base_url: str, api_key: str, proxy: Optional[str] = None): base_url: The base URL of the OpenProject instance api_key: API key for authentication proxy: Optional HTTP proxy URL + readonly: If True, POST/PATCH/PUT/DELETE requests are blocked """ self.base_url = base_url.rstrip("/") self.api_key = api_key self.proxy = proxy + self.readonly = readonly # Setup headers with Basic Auth self.headers = { @@ -49,6 +64,8 @@ def __init__(self, base_url: str, api_key: str, proxy: Optional[str] = None): logger.info(f"OpenProject Client initialized for: {self.base_url}") if self.proxy: logger.info(f"Using proxy: {self.proxy}") + if self.readonly: + logger.info("Read-only mode ENABLED: write requests will be blocked") def _encode_api_key(self) -> str: """Encode API key for Basic Auth""" @@ -59,7 +76,10 @@ async def _request( self, method: str, endpoint: str, data: Optional[Dict] = None ) -> Dict: """ - Execute an API request. + Execute an API request with exponential backoff retry. + + Retries automatically on transient server errors (500, 502, 503, 504) + and network failures. Non-retryable errors (4xx) are raised immediately. Args: method: HTTP method (GET, POST, etc.) @@ -70,61 +90,89 @@ async def _request( Dict: Response data from the API Raises: - Exception: If the request fails + Exception: If the request fails after all retry attempts """ + if self.readonly and method.upper() in _WRITE_METHODS: + raise Exception( + f"Read-only mode is enabled: {method.upper()} operations are not " + "permitted. Set READ_ONLY_MODE=false to allow write operations." + ) + url = f"{self.base_url}/api/v3{endpoint}" logger.debug(f"API Request: {method} {url}") if data: logger.debug(f"Request body: {json.dumps(data, indent=2)}") - # Configure SSL and timeout - ssl_context = ssl.create_default_context() - connector = aiohttp.TCPConnector(ssl=ssl_context) - timeout = aiohttp.ClientTimeout(total=30) - - async with aiohttp.ClientSession( - connector=connector, timeout=timeout - ) as session: - try: - # Build request parameters - request_params = { - "method": method, - "url": url, - "headers": self.headers, - "json": data, - } - - # Add proxy if configured - if self.proxy: - request_params["proxy"] = self.proxy - - async with session.request(**request_params) as response: - response_text = await response.text() - - logger.debug(f"Response status: {response.status}") - - # Parse response - try: - response_json = ( - json.loads(response_text) if response_text else {} - ) - except json.JSONDecodeError: - logger.error(f"Invalid JSON response: {response_text[:200]}...") - response_json = {} - - # Handle errors - if response.status >= 400: - error_msg = self._format_error_message( - response.status, response_text - ) - raise Exception(error_msg) - - return response_json - - except aiohttp.ClientError as e: - logger.error(f"Network error: {str(e)}") - raise Exception(f"Network error accessing {url}: {str(e)}") + last_exc: Optional[Exception] = None + + for attempt in range(_MAX_RETRIES + 1): + if attempt > 0: + delay = _RETRY_BASE_DELAY * (2 ** (attempt - 1)) + logger.warning( + f"Retry {attempt}/{_MAX_RETRIES} for {method} {url} in {delay:.1f}s" + ) + await asyncio.sleep(delay) + + ssl_context = ssl.create_default_context() + connector = aiohttp.TCPConnector(ssl=ssl_context) + timeout = aiohttp.ClientTimeout(total=30) + + async with aiohttp.ClientSession( + connector=connector, timeout=timeout + ) as session: + try: + request_params = { + "method": method, + "url": url, + "headers": self.headers, + "json": data, + } + + if self.proxy: + request_params["proxy"] = self.proxy + + async with session.request(**request_params) as response: + response_text = await response.text() + + logger.debug(f"Response status: {response.status}") + + try: + response_json = ( + json.loads(response_text) if response_text else {} + ) + except json.JSONDecodeError: + logger.error( + f"Invalid JSON response: {response_text[:200]}..." + ) + response_json = {} + + if response.status >= 400: + error_msg = self._format_error_message( + response.status, response_text + ) + if ( + response.status in _RETRYABLE_STATUSES + and attempt < _MAX_RETRIES + ): + logger.warning( + f"Transient error {response.status} on attempt " + f"{attempt + 1}/{_MAX_RETRIES + 1}, will retry" + ) + last_exc = Exception(error_msg) + else: + raise Exception(error_msg) + else: + return response_json + + except (aiohttp.ClientError, asyncio.TimeoutError) as e: + logger.warning( + f"Network error on attempt {attempt + 1}/{_MAX_RETRIES + 1}: " + f"{str(e)}" + ) + last_exc = Exception(f"Network error accessing {url}: {str(e)}") + + raise last_exc def _format_error_message(self, status: int, response_text: str) -> str: """Format error message based on HTTP status code""" @@ -284,6 +332,18 @@ async def create_work_package(self, data: Dict) -> Dict: if "date" in data: payload["date"] = data["date"] + # Add custom fields (customField1, customField2, etc.) + if "_links" not in payload: + payload["_links"] = {} + for key, value in data.items(): + if key.startswith("customField"): + if isinstance(value, dict) and "href" in value: + # List-type custom field - add to _links + payload["_links"][key] = value + else: + # Text/number custom field - add directly + payload[key] = value + # Create work package return await self._request("POST", "/work_packages", payload) @@ -503,6 +563,18 @@ async def update_work_package(self, work_package_id: int, data: Dict) -> Dict: if "date" in data: payload["date"] = data["date"] + # Add custom fields (customField1, customField2, etc.) + if "_links" not in payload: + payload["_links"] = {} + for key, value in data.items(): + if key.startswith("customField"): + if isinstance(value, dict) and "href" in value: + # List-type custom field - add to _links + payload["_links"][key] = value + else: + # Text/number custom field - add directly + payload[key] = value + return await self._request( "PATCH", f"/work_packages/{work_package_id}", payload ) diff --git a/src/server.py b/src/server.py index 19b7448..7ed50f0 100644 --- a/src/server.py +++ b/src/server.py @@ -33,6 +33,7 @@ base_url = os.getenv("OPENPROJECT_URL") api_key = os.getenv("OPENPROJECT_API_KEY") proxy = os.getenv("OPENPROJECT_PROXY") + readonly = os.getenv("READ_ONLY_MODE", "false").strip().lower() == "true" if not base_url or not api_key: raise ValueError( @@ -42,24 +43,34 @@ _client = OpenProjectClient( base_url=base_url, api_key=api_key, - proxy=proxy + proxy=proxy, + readonly=readonly, ) logger.info(f"โœ… OpenProject MCP Server initialized") logger.info(f" Server: {base_url}") logger.info(f" Proxy: {proxy if proxy else 'None'}") + if readonly: + logger.warning("โš ๏ธ READ-ONLY mode: write operations are blocked") + else: + logger.info(" Mode: Read-Write") except Exception as e: logger.error(f"โŒ Failed to initialize OpenProject client: {e}") raise -# Dependency injection helper for tools +# Dependency injection helpers for tools def get_client(): """Get OpenProject client instance.""" return _client +def is_readonly() -> bool: + """Return True if the server is running in read-only mode (READ_ONLY_MODE=true).""" + return _client.readonly if _client else False + + # Import ALL tool modules (decorators auto-register tools) logger.info("Loading tool modules...") @@ -84,3 +95,12 @@ def get_client(): logger.warning(f"โš ๏ธ Some tool modules failed to import: {e}") raise + +def main(): + """CLI entry point for openproject-mcp package.""" + mcp.run() + + +if __name__ == "__main__": + main() + diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..fe2b81d --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1 @@ +"""Test package for openproject-mcp-server.""" diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..990015e --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,22 @@ +""" +Shared pytest configuration and fixtures for all test modules. + +Sets dummy environment variables BEFORE any src.server import so +OpenProjectClient initialises without hitting a real API. +""" + +import os + +# Must be set before src.server is imported (happens when tool modules load) +os.environ.setdefault("OPENPROJECT_URL", "http://openproject.test") +os.environ.setdefault("OPENPROJECT_API_KEY", "test-api-key-00000000") +os.environ.setdefault("READ_ONLY_MODE", "false") + +import pytest +from unittest.mock import AsyncMock + + +@pytest.fixture +def mock_client(): + """Return a fresh AsyncMock that stands in for OpenProjectClient.""" + return AsyncMock() diff --git a/tests/e2e_test.py b/tests/e2e_test.py new file mode 100644 index 0000000..af7783f --- /dev/null +++ b/tests/e2e_test.py @@ -0,0 +1,477 @@ +#!/usr/bin/env python3 +""" +End-to-End Test Suite for OpenProject MCP Server + +This test suite validates the complete functionality of the MCP server +by testing against a real OpenProject instance running in Docker. +""" + +import os +import sys +import time +import asyncio +import logging +from typing import Dict, List, Any +import requests +from datetime import datetime, timedelta + +# Configure logging +logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") +logger = logging.getLogger(__name__) + + +class OpenProjectTestClient: + """Test client for OpenProject API""" + + def __init__(self, base_url: str, api_key: str): + self.base_url = base_url.rstrip("/") + self.api_key = api_key + self.headers = { + "Authorization": f"Basic {self._encode_api_key()}", + "Content-Type": "application/json", + "Accept": "application/json", + } + + def _encode_api_key(self) -> str: + """Encode API key for Basic Auth""" + import base64 + + credentials = f"apikey:{self.api_key}" + return base64.b64encode(credentials.encode()).decode() + + def test_connection(self) -> Dict: + """Test API connection""" + response = requests.get(f"{self.base_url}/api/v3", headers=self.headers) + response.raise_for_status() + return response.json() + + def create_project(self, name: str, description: str = "") -> Dict: + """Create a test project""" + data = {"name": name, "description": {"raw": description}, "public": True} + response = requests.post(f"{self.base_url}/api/v3/projects", headers=self.headers, json=data) + response.raise_for_status() + return response.json() + + def create_user(self, email: str, name: str, password: str) -> Dict: + """Create a test user""" + data = { + "login": email, + "email": email, + "firstName": name.split()[0], + "lastName": name.split()[-1] if len(name.split()) > 1 else "", + "password": password, + "status": "active", + } + response = requests.post(f"{self.base_url}/api/v3/users", headers=self.headers, json=data) + response.raise_for_status() + return response.json() + + def get_projects(self) -> List[Dict]: + """Get all projects""" + response = requests.get(f"{self.base_url}/api/v3/projects", headers=self.headers) + response.raise_for_status() + data = response.json() + return data.get("_embedded", {}).get("elements", []) + + def get_users(self) -> List[Dict]: + """Get all users""" + response = requests.get(f"{self.base_url}/api/v3/users", headers=self.headers) + response.raise_for_status() + data = response.json() + return data.get("_embedded", {}).get("elements", []) + + def get_types(self) -> List[Dict]: + """Get work package types""" + response = requests.get(f"{self.base_url}/api/v3/types", headers=self.headers) + response.raise_for_status() + data = response.json() + return data.get("_embedded", {}).get("elements", []) + + def get_priorities(self) -> List[Dict]: + """Get work package priorities""" + response = requests.get(f"{self.base_url}/api/v3/priorities", headers=self.headers) + response.raise_for_status() + data = response.json() + return data.get("_embedded", {}).get("elements", []) + + def get_statuses(self) -> List[Dict]: + """Get work package statuses""" + response = requests.get(f"{self.base_url}/api/v3/statuses", headers=self.headers) + response.raise_for_status() + data = response.json() + return data.get("_embedded", {}).get("elements", []) + + def cleanup_project(self, project_id: int): + """Delete a project""" + try: + requests.delete(f"{self.base_url}/api/v3/projects/{project_id}", headers=self.headers) + except Exception as e: + logger.warning(f"Failed to cleanup project {project_id}: {e}") + + def cleanup_user(self, user_id: int): + """Delete a user""" + try: + requests.delete(f"{self.base_url}/api/v3/users/{user_id}", headers=self.headers) + except Exception as e: + logger.warning(f"Failed to cleanup user {user_id}: {e}") + + +class MCPTestClient: + """Test client for MCP server""" + + def __init__(self, server_url: str): + self.server_url = server_url + + async def call_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]: + """Call an MCP tool""" + # Import the MCP server module + sys.path.append("/app") + from openproject_mcp import OpenProjectMCPServer, OpenProjectClient + + # Create server instance + server = OpenProjectMCPServer() + + # Initialize client + base_url = os.getenv("OPENPROJECT_URL") + api_key = os.getenv("OPENPROJECT_API_KEY") + if base_url and api_key: + server.client = OpenProjectClient(base_url, api_key) + + # Call the tool + result = await server.call_tool(tool_name, arguments) + return {"content": result} + + +class E2ETestSuite: + """End-to-end test suite""" + + def __init__(self): + self.openproject_url = os.getenv("OPENPROJECT_URL", "http://localhost:8080") + self.api_key = os.getenv("OPENPROJECT_API_KEY", "test-api-key") + self.mcp_server_url = os.getenv("MCP_SERVER_URL", "http://localhost:8080") + + # For testing, we'll use the admin user credentials + self.admin_username = "admin" + self.admin_password = "admin123" + + self.op_client = OpenProjectTestClient(self.openproject_url, self.api_key) + self.mcp_client = MCPTestClient(self.mcp_server_url) + + # Test data + self.test_project = None + self.test_user = None + self.created_work_packages = [] + + async def setup_test_data(self): + """Set up test data""" + logger.info("Setting up test data...") + + # Wait for OpenProject to be ready + await self.wait_for_openproject() + + # Wait a bit more for full initialization + await asyncio.sleep(30) + + # Try to create test project + try: + self.test_project = self.op_client.create_project("E2E Test Project", "Test project for end-to-end testing") + logger.info(f"Created test project: {self.test_project['id']}") + except Exception as e: + logger.warning(f"Failed to create test project: {e}") + # Try to get existing projects + projects = self.op_client.get_projects() + if projects: + self.test_project = projects[0] + logger.info(f"Using existing project: {self.test_project['id']}") + else: + raise Exception("No projects available for testing") + + # Try to create test user + try: + self.test_user = self.op_client.create_user("test@example.com", "Test User", "test123") + logger.info(f"Created test user: {self.test_user['id']}") + except Exception as e: + logger.warning(f"Failed to create test user: {e}") + # Try to get existing users + users = self.op_client.get_users() + if users: + self.test_user = users[0] + logger.info(f"Using existing user: {self.test_user['id']}") + else: + raise Exception("No users available for testing") + + async def wait_for_openproject(self, timeout: int = 300): + """Wait for OpenProject to be ready""" + logger.info("Waiting for OpenProject to be ready...") + start_time = time.time() + + while time.time() - start_time < timeout: + try: + # Check if OpenProject is responding (use public endpoint) + response = requests.get(f"{self.openproject_url}/", timeout=10) + if response.status_code == 200: + logger.info("OpenProject is ready!") + return + except Exception as e: + logger.debug(f"OpenProject not ready yet: {e}") + await asyncio.sleep(10) + + raise Exception(f"OpenProject not ready after {timeout} seconds") + + async def test_connection(self): + """Test MCP server connection to OpenProject""" + logger.info("Testing MCP server connection...") + + result = await self.mcp_client.call_tool("test_connection", {}) + + assert "content" in result + assert len(result["content"]) > 0 + assert "API connection successful" in result["content"][0].text + + logger.info("โœ… Connection test passed") + + async def test_list_projects(self): + """Test listing projects""" + logger.info("Testing list_projects tool...") + + result = await self.mcp_client.call_tool("list_projects", {"active_only": True}) + + assert "content" in result + assert len(result["content"]) > 0 + content = result["content"][0].text + + assert "project(s)" in content + assert self.test_project["name"] in content + + logger.info("โœ… List projects test passed") + + async def test_list_users(self): + """Test listing users""" + logger.info("Testing list_users tool...") + + result = await self.mcp_client.call_tool("list_users", {"active_only": True}) + + assert "content" in result + assert len(result["content"]) > 0 + content = result["content"][0].text + + assert "user(s)" in content + assert self.test_user["name"] in content + + logger.info("โœ… List users test passed") + + async def test_list_types(self): + """Test listing work package types""" + logger.info("Testing list_types tool...") + + result = await self.mcp_client.call_tool("list_types", {}) + + assert "content" in result + assert len(result["content"]) > 0 + content = result["content"][0].text + + assert "work package types" in content + + logger.info("โœ… List types test passed") + + async def test_list_priorities(self): + """Test listing priorities""" + logger.info("Testing list_priorities tool...") + + result = await self.mcp_client.call_tool("list_priorities", {}) + + assert "content" in result + assert len(result["content"]) > 0 + content = result["content"][0].text + + assert "priorities" in content + + logger.info("โœ… List priorities test passed") + + async def test_list_statuses(self): + """Test listing statuses""" + logger.info("Testing list_statuses tool...") + + result = await self.mcp_client.call_tool("list_statuses", {}) + + assert "content" in result + assert len(result["content"]) > 0 + content = result["content"][0].text + + assert "statuses" in content + + logger.info("โœ… List statuses test passed") + + async def test_create_work_package(self): + """Test creating a work package""" + logger.info("Testing create_work_package tool...") + + # Get types and priorities + types = self.op_client.get_types() + priorities = self.op_client.get_priorities() + + assert len(types) > 0, "No work package types available" + assert len(priorities) > 0, "No priorities available" + + # Create work package + result = await self.mcp_client.call_tool( + "create_work_package", + { + "project_id": self.test_project["id"], + "subject": "E2E Test Work Package", + "description": "This is a test work package created by the E2E test suite", + "type_id": types[0]["id"], + "priority_id": priorities[0]["id"], + "assignee_id": self.test_user["id"], + }, + ) + + assert "content" in result + assert len(result["content"]) > 0 + content = result["content"][0].text + + assert "Work package created successfully" in content + assert "E2E Test Work Package" in content + + # Extract work package ID from response + import re + + id_match = re.search(r"#(\d+)", content) + if id_match: + wp_id = int(id_match.group(1)) + self.created_work_packages.append(wp_id) + + logger.info("โœ… Create work package test passed") + + async def test_list_work_packages(self): + """Test listing work packages""" + logger.info("Testing list_work_packages tool...") + + result = await self.mcp_client.call_tool( + "list_work_packages", + {"project_id": self.test_project["id"], "status": "open"}, + ) + + assert "content" in result + assert len(result["content"]) > 0 + content = result["content"][0].text + + assert "work package(s)" in content + assert "E2E Test Work Package" in content + + logger.info("โœ… List work packages test passed") + + async def test_create_meeting(self): + """Test creating a meeting""" + logger.info("Testing create_meeting tool...") + + # Get types + types = self.op_client.get_types() + assert len(types) > 0, "No work package types available" + + # Create meeting + tomorrow = (datetime.now() + timedelta(days=1)).strftime("%Y-%m-%d") + + result = await self.mcp_client.call_tool( + "create_meeting", + { + "project_id": self.test_project["id"], + "meeting_title": "E2E Test Meeting", + "meeting_date": tomorrow, + "meeting_time": "10:00", + "duration_minutes": 60, + "attendees": [self.test_user["id"]], + "agenda": "Test agenda for E2E testing", + "meeting_type": "general", + "location": "Test Room", + }, + ) + + assert "content" in result + assert len(result["content"]) > 0 + content = result["content"][0].text + + assert "Meeting work package created successfully" in content + assert "E2E Test Meeting" in content + + logger.info("โœ… Create meeting test passed") + + async def test_list_meetings(self): + """Test listing meetings""" + logger.info("Testing list_meetings tool...") + + result = await self.mcp_client.call_tool( + "list_meetings", + {"project_id": self.test_project["id"], "status": "scheduled"}, + ) + + assert "content" in result + assert len(result["content"]) > 0 + content = result["content"][0].text + + assert "meeting(s)" in content + assert "E2E Test Meeting" in content + + logger.info("โœ… List meetings test passed") + + async def cleanup(self): + """Clean up test data""" + logger.info("Cleaning up test data...") + + # Clean up work packages (if any were created) + for wp_id in self.created_work_packages: + try: + requests.delete( + f"{self.openproject_url}/api/v3/work_packages/{wp_id}", + headers=self.op_client.headers, + ) + except Exception as e: + logger.warning(f"Failed to cleanup work package {wp_id}: {e}") + + # Clean up project + if self.test_project: + self.op_client.cleanup_project(self.test_project["id"]) + + # Clean up user + if self.test_user: + self.op_client.cleanup_user(self.test_user["id"]) + + logger.info("Cleanup completed") + + async def run_all_tests(self): + """Run all tests""" + logger.info("Starting E2E test suite...") + + try: + # Setup + await self.setup_test_data() + + # Run tests + await self.test_connection() + await self.test_list_projects() + await self.test_list_users() + await self.test_list_types() + await self.test_list_priorities() + await self.test_list_statuses() + await self.test_create_work_package() + await self.test_list_work_packages() + await self.test_create_meeting() + await self.test_list_meetings() + + logger.info("๐ŸŽ‰ All tests passed!") + + except Exception as e: + logger.error(f"โŒ Test failed: {e}") + raise + finally: + await self.cleanup() + + +async def main(): + """Main test runner""" + test_suite = E2ETestSuite() + await test_suite.run_all_tests() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/tests/setup_test_data.py b/tests/setup_test_data.py new file mode 100644 index 0000000..3bc0dea --- /dev/null +++ b/tests/setup_test_data.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python3 +""" +Test Data Setup Script for OpenProject + +This script sets up test data in OpenProject for E2E testing. +It creates projects, users, and other necessary data. +""" + +import os +import time +import requests +import base64 +from typing import Dict + + +class OpenProjectSetup: + """Setup class for OpenProject test data""" + + def __init__(self, base_url: str, api_key: str): + self.base_url = base_url.rstrip("/") + self.api_key = api_key + self.headers = { + "Authorization": f"Basic {self._encode_api_key()}", + "Content-Type": "application/json", + "Accept": "application/json", + } + + def _encode_api_key(self) -> str: + """Encode API key for Basic Auth""" + credentials = f"apikey:{self.api_key}" + return base64.b64encode(credentials.encode()).decode() + + def wait_for_ready(self, timeout: int = 300): + """Wait for OpenProject to be ready""" + print("Waiting for OpenProject to be ready...") + start_time = time.time() + + while time.time() - start_time < timeout: + try: + response = requests.get(f"{self.base_url}/api/v3", headers=self.headers) + response.raise_for_status() + print("OpenProject is ready!") + return + except Exception as e: + print(f"OpenProject not ready yet: {e}") + time.sleep(10) + + raise Exception(f"OpenProject not ready after {timeout} seconds") + + def create_test_project(self) -> Dict: + """Create a test project""" + data = { + "name": "E2E Test Project", + "description": {"raw": "Test project for end-to-end testing"}, + "public": True, + } + response = requests.post(f"{self.base_url}/api/v3/projects", headers=self.headers, json=data) + response.raise_for_status() + return response.json() + + def create_test_user(self) -> Dict: + """Create a test user""" + data = { + "login": "test@example.com", + "email": "test@example.com", + "firstName": "Test", + "lastName": "User", + "password": "test123", + "status": "active", + } + response = requests.post(f"{self.base_url}/api/v3/users", headers=self.headers, json=data) + response.raise_for_status() + return response.json() + + def create_test_work_package(self, project_id: int, type_id: int) -> Dict: + """Create a test work package""" + # First get the form + form_data = { + "_links": { + "project": {"href": f"/api/v3/projects/{project_id}"}, + "type": {"href": f"/api/v3/types/{type_id}"}, + }, + "subject": "Test Work Package", + } + + form_response = requests.post( + f"{self.base_url}/api/v3/work_packages/form", + headers=self.headers, + json=form_data, + ) + form_response.raise_for_status() + form = form_response.json() + + # Create the work package + payload = form.get("payload", form_data) + payload["lockVersion"] = form.get("lockVersion", 0) + + response = requests.post(f"{self.base_url}/api/v3/work_packages", headers=self.headers, json=payload) + response.raise_for_status() + return response.json() + + def setup_test_data(self): + """Set up all test data""" + print("Setting up test data...") + + # Wait for OpenProject to be ready + self.wait_for_ready() + + # Create test project + project = self.create_test_project() + print(f"Created test project: {project['id']} - {project['name']}") + + # Create test user + user = self.create_test_user() + print(f"Created test user: {user['id']} - {user['name']}") + + # Get work package types + types_response = requests.get(f"{self.base_url}/api/v3/types", headers=self.headers) + types_response.raise_for_status() + types = types_response.json().get("_embedded", {}).get("elements", []) + + if types: + # Create a test work package + wp = self.create_test_work_package(project["id"], types[0]["id"]) + print(f"Created test work package: {wp['id']} - {wp['subject']}") + + print("Test data setup completed!") + + return {"project": project, "user": user, "types": types} + + +def main(): + """Main setup function""" + base_url = os.getenv("OPENPROJECT_URL", "http://localhost:8080") + api_key = os.getenv("OPENPROJECT_API_KEY", "test-api-key") + + setup = OpenProjectSetup(base_url, api_key) + setup.setup_test_data() + + +if __name__ == "__main__": + main() diff --git a/tests/simple_e2e_test.py b/tests/simple_e2e_test.py new file mode 100644 index 0000000..0886403 --- /dev/null +++ b/tests/simple_e2e_test.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python3 +""" +Simplified End-to-End Test Suite for OpenProject MCP Server + +This test suite validates basic functionality without requiring a fully configured OpenProject instance. +""" + +import os +import sys +import asyncio +import logging + +# Configure logging +logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") +logger = logging.getLogger(__name__) + + +class SimpleE2ETestSuite: + """Simplified E2E test suite""" + + def __init__(self): + self.openproject_url = os.getenv("OPENPROJECT_URL", "http://localhost:8080") + self.api_key = os.getenv("OPENPROJECT_API_KEY", "test-api-key") + + async def test_mcp_server_initialization(self): + """Test that MCP server can be initialized""" + logger.info("Testing MCP server initialization...") + + try: + # Import the MCP server module + sys.path.append("/app") + import importlib.util + + spec = importlib.util.spec_from_file_location("openproject_mcp", "/app/openproject-mcp.py") + openproject_mcp = importlib.util.module_from_spec(spec) + spec.loader.exec_module(openproject_mcp) + OpenProjectMCPServer = openproject_mcp.OpenProjectMCPServer + + # Create server instance + server = OpenProjectMCPServer() + assert server.server is not None + assert server.client is None # Should be None initially + + logger.info("โœ… MCP server initialization test passed") + return True + + except Exception as e: + logger.error(f"โŒ MCP server initialization test failed: {e}") + return False + + async def test_openproject_client_initialization(self): + """Test that OpenProject client can be initialized""" + logger.info("Testing OpenProject client initialization...") + + try: + # Import the MCP server module + sys.path.append("/app") + import importlib.util + + spec = importlib.util.spec_from_file_location("openproject_mcp", "/app/openproject-mcp.py") + openproject_mcp = importlib.util.module_from_spec(spec) + spec.loader.exec_module(openproject_mcp) + OpenProjectClient = openproject_mcp.OpenProjectClient + + # Create client instance + client = OpenProjectClient(self.openproject_url, self.api_key) + assert client.base_url == self.openproject_url + assert client.api_key == self.api_key + + logger.info("โœ… OpenProject client initialization test passed") + return True + + except Exception as e: + logger.error(f"โŒ OpenProject client initialization test failed: {e}") + return False + + async def test_tool_schemas(self): + """Test that tool schemas are properly defined""" + logger.info("Testing tool schemas...") + + try: + # Import the MCP server module + sys.path.append("/app") + import importlib.util + + spec = importlib.util.spec_from_file_location("openproject_mcp", "/app/openproject-mcp.py") + openproject_mcp = importlib.util.module_from_spec(spec) + spec.loader.exec_module(openproject_mcp) + OpenProjectMCPServer = openproject_mcp.OpenProjectMCPServer + + # Create server instance + server = OpenProjectMCPServer() + + # The tools are registered via decorators, so we can't easily access them directly + # Instead, let's test that the server was created successfully + assert server.server is not None + assert hasattr(server.server, "list_tools") + + logger.info("โœ… Tool schemas test passed - server has list_tools handler") + return True + + except Exception as e: + logger.error(f"โŒ Tool schemas test failed: {e}") + return False + + async def test_tool_call_without_client(self): + """Test that server can be created without client""" + logger.info("Testing server creation without client...") + + try: + # Import the MCP server module + sys.path.append("/app") + import importlib.util + + spec = importlib.util.spec_from_file_location("openproject_mcp", "/app/openproject-mcp.py") + openproject_mcp = importlib.util.module_from_spec(spec) + spec.loader.exec_module(openproject_mcp) + OpenProjectMCPServer = openproject_mcp.OpenProjectMCPServer + + # Create server instance without client + server = OpenProjectMCPServer() + + # Check that server was created successfully + assert server.server is not None + assert server.client is None # Should be None initially + + logger.info("โœ… Server creation without client test passed") + return True + + except Exception as e: + logger.error(f"โŒ Server creation without client test failed: {e}") + return False + + async def test_unknown_tool_call(self): + """Test server initialization with client""" + logger.info("Testing server initialization with client...") + + try: + # Import the MCP server module + sys.path.append("/app") + import importlib.util + + spec = importlib.util.spec_from_file_location("openproject_mcp", "/app/openproject-mcp.py") + openproject_mcp = importlib.util.module_from_spec(spec) + spec.loader.exec_module(openproject_mcp) + OpenProjectMCPServer = openproject_mcp.OpenProjectMCPServer + OpenProjectClient = openproject_mcp.OpenProjectClient + + # Create server instance + server = OpenProjectMCPServer() + + # Initialize client + server.client = OpenProjectClient(self.openproject_url, self.api_key) + + # Check that client was set + assert server.client is not None + assert server.client.base_url == self.openproject_url + + logger.info("โœ… Server initialization with client test passed") + return True + + except Exception as e: + logger.error(f"โŒ Server initialization with client test failed: {e}") + return False + + async def run_all_tests(self): + """Run all tests""" + logger.info("Starting simplified E2E test suite...") + + tests = [ + self.test_mcp_server_initialization, + self.test_openproject_client_initialization, + self.test_tool_schemas, + self.test_tool_call_without_client, + self.test_unknown_tool_call, + ] + + passed = 0 + failed = 0 + + for test in tests: + try: + result = await test() + if result: + passed += 1 + else: + failed += 1 + except Exception as e: + logger.error(f"Test {test.__name__} failed with exception: {e}") + failed += 1 + + logger.info(f"Test results: {passed} passed, {failed} failed") + + if failed == 0: + logger.info("๐ŸŽ‰ All tests passed!") + return True + else: + logger.error(f"โŒ {failed} tests failed") + return False + + +async def main(): + """Main test runner""" + test_suite = SimpleE2ETestSuite() + success = await test_suite.run_all_tests() + + if not success: + sys.exit(1) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/tests/test_basic.py b/tests/test_basic.py new file mode 100644 index 0000000..06040c5 --- /dev/null +++ b/tests/test_basic.py @@ -0,0 +1,23 @@ +"""Basic smoke tests for openproject-mcp-server package.""" + + +def test_client_module_import(): + """Verify src.client module can be imported.""" + from src import client + + assert client is not None + + +def test_version_defined(): + """Verify version is defined in src.client.""" + from src.client import __version__ + + assert __version__ is not None + assert isinstance(__version__, str) + + +def test_cli_entry_point(): + """Verify CLI entry point function exists in src.server.""" + from src.server import main + + assert callable(main) diff --git a/tests/test_client.py b/tests/test_client.py new file mode 100644 index 0000000..1ce42bc --- /dev/null +++ b/tests/test_client.py @@ -0,0 +1,33 @@ +"""Tests for OpenProjectClient.""" + + +def test_client_import(): + """Verify OpenProjectClient can be imported.""" + from src.client import OpenProjectClient + + assert OpenProjectClient is not None + + +def test_client_initialization(): + """Test that OpenProjectClient can be instantiated.""" + from src.client import OpenProjectClient + + client = OpenProjectClient("https://test.example.com", "test-key") + assert client.base_url == "https://test.example.com" + assert client.api_key == "test-key" + + +def test_client_initialization_strips_trailing_slash(): + """Test that trailing slash is removed from base_url.""" + from src.client import OpenProjectClient + + client = OpenProjectClient("https://test.example.com/", "test-key") + assert client.base_url == "https://test.example.com" + + +def test_client_with_proxy(): + """Test that proxy configuration is accepted.""" + from src.client import OpenProjectClient + + client = OpenProjectClient("https://test.example.com", "test-key", proxy="http://proxy:8080") + assert client.proxy == "http://proxy:8080" diff --git a/tests/test_connection.py b/tests/test_connection.py new file mode 100644 index 0000000..f0c510f --- /dev/null +++ b/tests/test_connection.py @@ -0,0 +1,60 @@ +"""Tests for src/tools/connection.py โ€” test_connection and check_permissions.""" + +from unittest.mock import patch + +from src.tools.connection import check_permissions, test_connection + + +async def test_connection_success(mock_client): + mock_client.test_connection.return_value = { + "instanceVersion": "13.4.0", + "coreVersion": "13.4.0", + } + with patch("src.tools.connection.get_client", return_value=mock_client): + result = await test_connection() + + assert "โœ…" in result + assert "13.4.0" in result + + +async def test_connection_failure(mock_client): + mock_client.test_connection.side_effect = Exception("Connection refused") + with patch("src.tools.connection.get_client", return_value=mock_client): + result = await test_connection() + + assert "โŒ" in result + assert "Connection refused" in result + + +async def test_check_permissions_success(mock_client): + mock_client.check_permissions.return_value = { + "name": "Alice", + "email": "alice@example.com", + "login": "alice", + "status": "active", + "admin": True, + } + with patch("src.tools.connection.get_client", return_value=mock_client): + result = await check_permissions() + + assert "โœ…" in result + assert "Alice" in result + assert "alice@example.com" in result + assert "Yes" in result # admin + + +async def test_check_permissions_empty_result(mock_client): + mock_client.check_permissions.return_value = {} + with patch("src.tools.connection.get_client", return_value=mock_client): + result = await check_permissions() + + assert "โŒ" in result + + +async def test_check_permissions_failure(mock_client): + mock_client.check_permissions.side_effect = Exception("Unauthorized") + with patch("src.tools.connection.get_client", return_value=mock_client): + result = await check_permissions() + + assert "โŒ" in result + assert "Unauthorized" in result diff --git a/tests/test_hierarchy.py b/tests/test_hierarchy.py new file mode 100644 index 0000000..1eb6287 --- /dev/null +++ b/tests/test_hierarchy.py @@ -0,0 +1,113 @@ +"""Tests for src/tools/hierarchy.py.""" + +from unittest.mock import patch + +from src.tools.hierarchy import ( + list_work_package_children, + remove_work_package_parent, + set_work_package_parent, +) + +_CHILD_WP = { + "id": 50, + "subject": "Sub-task", + "_embedded": { + "type": {"name": "Task"}, + "status": {"name": "New"}, + }, +} + + +# --------------------------------------------------------------------------- +# set_work_package_parent +# --------------------------------------------------------------------------- + +async def test_set_parent_success(mock_client): + mock_client.update_work_package.return_value = { + "id": 50, + "subject": "Sub-task", + "_embedded": { + "parent": {"subject": "Main task"}, + }, + } + with patch("src.tools.hierarchy.get_client", return_value=mock_client): + result = await set_work_package_parent(child_id=50, parent_id=10) + + assert "โœ…" in result + assert "50" in result + assert "10" in result + + +async def test_set_parent_failure(mock_client): + mock_client.update_work_package.side_effect = Exception("Circular dependency") + with patch("src.tools.hierarchy.get_client", return_value=mock_client): + result = await set_work_package_parent(child_id=50, parent_id=10) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# remove_work_package_parent +# --------------------------------------------------------------------------- + +async def test_remove_parent_success(mock_client): + mock_client.update_work_package.return_value = {"id": 50, "subject": "Sub-task"} + with patch("src.tools.hierarchy.get_client", return_value=mock_client): + result = await remove_work_package_parent(work_package_id=50) + + assert "โœ…" in result + assert "50" in result + + +async def test_remove_parent_failure(mock_client): + mock_client.update_work_package.side_effect = Exception("Not found") + with patch("src.tools.hierarchy.get_client", return_value=mock_client): + result = await remove_work_package_parent(work_package_id=99) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# list_work_package_children +# --------------------------------------------------------------------------- + +async def test_list_children_success(mock_client): + mock_client.get_work_package_children.return_value = { + "_embedded": {"elements": [_CHILD_WP]}, + "total": 1, + } + with patch("src.tools.hierarchy.get_client", return_value=mock_client): + result = await list_work_package_children(work_package_id=10) + + assert "Sub-task" in result + + +async def test_list_children_empty(mock_client): + mock_client.get_work_package_children.return_value = { + "_embedded": {"elements": []}, + "total": 0, + } + with patch("src.tools.hierarchy.get_client", return_value=mock_client): + result = await list_work_package_children(work_package_id=10) + + assert "no children" in result.lower() + + +async def test_list_children_pagination_hint(mock_client): + children = [dict(_CHILD_WP, id=i, subject=f"Task {i}") for i in range(1, 6)] + mock_client.get_work_package_children.return_value = { + "_embedded": {"elements": children}, + "total": 50, + } + with patch("src.tools.hierarchy.get_client", return_value=mock_client): + result = await list_work_package_children(work_package_id=10, page_size=5) + + assert "Pagination" in result or "offset" in result + + +async def test_list_children_failure(mock_client): + mock_client.get_work_package_children.side_effect = Exception("Not found") + with patch("src.tools.hierarchy.get_client", return_value=mock_client): + result = await list_work_package_children(work_package_id=99) + + assert "โŒ" in result diff --git a/tests/test_memberships.py b/tests/test_memberships.py new file mode 100644 index 0000000..ce87f5b --- /dev/null +++ b/tests/test_memberships.py @@ -0,0 +1,201 @@ +"""Tests for src/tools/memberships.py.""" + +import pytest +from pydantic import ValidationError +from unittest.mock import patch + +from src.tools.memberships import ( + CreateMembershipInput, + UpdateMembershipInput, + create_membership, + delete_membership, + get_membership, + list_memberships, + update_membership, +) + +_MEMBER = { + "_links": { + "principal": {"title": "Bob", "href": "/api/v3/users/7"}, + "project": {"title": "Alpha"}, + "roles": [{"title": "Developer"}], + } +} + +_MEMBER_DETAIL = { + "id": 11, + "createdAt": "2025-01-01T00:00:00Z", + "updatedAt": "2025-01-02T00:00:00Z", + "_links": { + "project": {"title": "Alpha"}, + "principal": {"title": "Bob"}, + "roles": [{"title": "Developer"}], + }, +} + + +# --------------------------------------------------------------------------- +# Input model validation +# --------------------------------------------------------------------------- + +def test_create_membership_input_valid(): + inp = CreateMembershipInput(project_id=1, user_id=7, role_ids=[2]) + assert inp.project_id == 1 + assert inp.user_id == 7 + + +def test_create_membership_input_invalid_project_id(): + with pytest.raises(ValidationError): + CreateMembershipInput(project_id=0, user_id=7, role_ids=[2]) + + +def test_update_membership_input_valid(): + inp = UpdateMembershipInput(membership_id=11, role_ids=[3]) + assert inp.membership_id == 11 + + +def test_update_membership_input_invalid_id(): + with pytest.raises(ValidationError): + UpdateMembershipInput(membership_id=0, role_ids=[1]) + + +# --------------------------------------------------------------------------- +# list_memberships +# --------------------------------------------------------------------------- + +async def test_list_memberships_success(mock_client): + mock_client.get_memberships.return_value = {"_embedded": {"elements": [_MEMBER]}} + with patch("src.tools.memberships.get_client", return_value=mock_client): + result = await list_memberships() + + assert "Bob" in result + assert "Developer" in result + + +async def test_list_memberships_filtered_by_project(mock_client): + mock_client.get_memberships.return_value = {"_embedded": {"elements": [_MEMBER]}} + with patch("src.tools.memberships.get_client", return_value=mock_client): + result = await list_memberships(project_id=1) + + assert "Bob" in result + # project name should not appear when filtering by single project + assert "Alpha" not in result + + +async def test_list_memberships_empty(mock_client): + mock_client.get_memberships.return_value = {"_embedded": {"elements": []}} + with patch("src.tools.memberships.get_client", return_value=mock_client): + result = await list_memberships() + + assert "No memberships found" in result + + +# --------------------------------------------------------------------------- +# get_membership +# --------------------------------------------------------------------------- + +async def test_get_membership_success(mock_client): + mock_client.get_membership.return_value = _MEMBER_DETAIL + with patch("src.tools.memberships.get_client", return_value=mock_client): + result = await get_membership(membership_id=11) + + assert "โœ…" in result + assert "Alpha" in result + assert "Bob" in result + assert "Developer" in result + + +async def test_get_membership_failure(mock_client): + mock_client.get_membership.side_effect = Exception("Not found") + with patch("src.tools.memberships.get_client", return_value=mock_client): + result = await get_membership(membership_id=99) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# create_membership +# --------------------------------------------------------------------------- + +async def test_create_membership_success(mock_client): + mock_client.create_membership.return_value = { + "id": 20, + "_embedded": { + "project": {"name": "Alpha"}, + "principal": {"name": "Bob"}, + "roles": [{"name": "Developer"}], + }, + } + with patch("src.tools.memberships.get_client", return_value=mock_client): + inp = CreateMembershipInput(project_id=1, user_id=7, role_id=2) + result = await create_membership(inp) + + assert "โœ…" in result + assert "20" in result + + +async def test_create_membership_missing_principal(mock_client): + with patch("src.tools.memberships.get_client", return_value=mock_client): + # No user_id or group_id + inp = CreateMembershipInput(project_id=1, role_id=2) + result = await create_membership(inp) + + assert "โŒ" in result + mock_client.create_membership.assert_not_called() + + +async def test_create_membership_missing_role(mock_client): + with patch("src.tools.memberships.get_client", return_value=mock_client): + # No role_ids or role_id + inp = CreateMembershipInput(project_id=1, user_id=7) + result = await create_membership(inp) + + assert "โŒ" in result + mock_client.create_membership.assert_not_called() + + +# --------------------------------------------------------------------------- +# update_membership +# --------------------------------------------------------------------------- + +async def test_update_membership_success(mock_client): + mock_client.update_membership.return_value = { + "id": 11, + "_embedded": {"roles": [{"name": "Manager"}]}, + } + with patch("src.tools.memberships.get_client", return_value=mock_client): + inp = UpdateMembershipInput(membership_id=11, role_ids=[4]) + result = await update_membership(inp) + + assert "โœ…" in result + assert "11" in result + + +async def test_update_membership_no_fields(mock_client): + with patch("src.tools.memberships.get_client", return_value=mock_client): + inp = UpdateMembershipInput(membership_id=11) + result = await update_membership(inp) + + assert "โŒ" in result + mock_client.update_membership.assert_not_called() + + +# --------------------------------------------------------------------------- +# delete_membership +# --------------------------------------------------------------------------- + +async def test_delete_membership_success(mock_client): + mock_client.delete_membership.return_value = True + with patch("src.tools.memberships.get_client", return_value=mock_client): + result = await delete_membership(membership_id=11) + + assert "โœ…" in result + assert "11" in result + + +async def test_delete_membership_failure(mock_client): + mock_client.delete_membership.side_effect = Exception("Permission denied") + with patch("src.tools.memberships.get_client", return_value=mock_client): + result = await delete_membership(membership_id=11) + + assert "โŒ" in result diff --git a/tests/test_news.py b/tests/test_news.py new file mode 100644 index 0000000..e225300 --- /dev/null +++ b/tests/test_news.py @@ -0,0 +1,254 @@ +"""Tests for src/tools/news.py โ€” proper pytest rewrite of test_news_tools.py.""" + +import pytest +from pydantic import ValidationError +from unittest.mock import patch + +from src.tools.news import ( + CreateNewsInput, + UpdateNewsInput, + create_news, + delete_news, + get_news, + list_news, + update_news, +) +from src.utils.formatting import format_news_detail, format_news_list + +_NEWS_ITEM = { + "id": 1, + "title": "Sprint Review", + "summary": "Completed user stories", + "createdAt": "2025-03-15T10:00:00.000Z", + "_links": { + "project": {"title": "Alpha"}, + "author": {"title": "Alice"}, + }, +} + +_NEWS_DETAIL = { + "id": 5, + "title": "Release Notes", + "summary": "Version 1.0 shipped", + "description": {"raw": "# v1.0\n\nAll features complete."}, + "createdAt": "2025-03-20T09:00:00.000Z", + "_links": { + "self": {"href": "/api/v3/news/5"}, + "project": {"href": "/api/v3/projects/1", "title": "Alpha"}, + "author": {"href": "/api/v3/users/2", "title": "Alice"}, + }, +} + + +# --------------------------------------------------------------------------- +# Input model validation +# --------------------------------------------------------------------------- + +def test_create_news_input_valid(): + inp = CreateNewsInput( + project_id=1, title="Test News", + summary="Short summary", description="# Content" + ) + assert inp.project_id == 1 + assert inp.title == "Test News" + + +def test_create_news_input_missing_summary(): + with pytest.raises(ValidationError): + CreateNewsInput(project_id=1, title="Test", description="Content") + + +def test_create_news_input_title_too_long(): + with pytest.raises(ValidationError): + CreateNewsInput( + project_id=1, title="A" * 256, + summary="S", description="D" + ) + + +def test_create_news_input_invalid_project_id(): + with pytest.raises(ValidationError): + CreateNewsInput(project_id=0, title="T", summary="S", description="D") + + +def test_update_news_input_valid_partial(): + inp = UpdateNewsInput(news_id=5, title="New Title") + assert inp.news_id == 5 + assert inp.summary is None + + +def test_update_news_input_description_only(): + inp = UpdateNewsInput(news_id=10, description="New desc") + assert inp.title is None + assert inp.description == "New desc" + + +# --------------------------------------------------------------------------- +# Formatting utilities +# --------------------------------------------------------------------------- + +def test_format_news_list_empty(): + result = format_news_list([]) + assert "No news entries found" in result + + +def test_format_news_list_single_item(): + result = format_news_list([_NEWS_ITEM]) + assert "Sprint Review" in result + assert "Alpha" in result + assert "Alice" in result + assert "2025-03-15" in result + + +def test_format_news_list_multiple_items(): + items = [ + dict(_NEWS_ITEM, id=1, title="News 1"), + dict(_NEWS_ITEM, id=2, title="News 2"), + ] + result = format_news_list(items) + assert "2 items" in result or "News 1" in result + assert "News 2" in result + + +def test_format_news_list_long_summary_truncated(): + item = dict(_NEWS_ITEM, summary="X" * 200) + result = format_news_list([item]) + assert "..." in result + + +def test_format_news_detail(): + result = format_news_detail(_NEWS_DETAIL) + assert "Release Notes" in result + assert "Alpha" in result + assert "Alice" in result + assert "v1.0" in result + + +# --------------------------------------------------------------------------- +# list_news +# --------------------------------------------------------------------------- + +async def test_list_news_success(mock_client): + mock_client.get_news.return_value = { + "_embedded": {"elements": [_NEWS_ITEM]}, + "total": 1, + } + with patch("src.tools.news.get_client", return_value=mock_client): + result = await list_news(project_id=1) + + assert "Sprint Review" in result + + +async def test_list_news_empty(mock_client): + mock_client.get_news.return_value = {"_embedded": {"elements": []}, "total": 0} + with patch("src.tools.news.get_client", return_value=mock_client): + result = await list_news() + + assert "No news entries found" in result + + +async def test_list_news_failure(mock_client): + mock_client.get_news.side_effect = Exception("API Error") + with patch("src.tools.news.get_client", return_value=mock_client): + result = await list_news() + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# create_news +# --------------------------------------------------------------------------- + +async def test_create_news_success(mock_client): + mock_client.create_news.return_value = { + "id": 10, "title": "Sprint Review", + "summary": "Completed stories", "description": {"raw": "# Done"}, + } + with patch("src.tools.news.get_client", return_value=mock_client): + inp = CreateNewsInput( + project_id=1, title="Sprint Review", + summary="Completed stories", description="# Done" + ) + result = await create_news(inp) + + assert "โœ…" in result + assert "10" in result + + +async def test_create_news_failure(mock_client): + mock_client.create_news.side_effect = Exception("Forbidden") + with patch("src.tools.news.get_client", return_value=mock_client): + inp = CreateNewsInput( + project_id=1, title="T", summary="S", description="D" + ) + result = await create_news(inp) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# get_news +# --------------------------------------------------------------------------- + +async def test_get_news_success(mock_client): + mock_client.get_news_item.return_value = _NEWS_DETAIL + with patch("src.tools.news.get_client", return_value=mock_client): + result = await get_news(news_id=5) + + assert "Release Notes" in result + assert "Alpha" in result + + +async def test_get_news_failure(mock_client): + mock_client.get_news_item.side_effect = Exception("Not found") + with patch("src.tools.news.get_client", return_value=mock_client): + result = await get_news(news_id=99) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# update_news +# --------------------------------------------------------------------------- + +async def test_update_news_success(mock_client): + mock_client.update_news.return_value = { + "id": 5, "title": "Updated Title", "summary": "New summary", + } + with patch("src.tools.news.get_client", return_value=mock_client): + inp = UpdateNewsInput(news_id=5, title="Updated Title") + result = await update_news(inp) + + assert "โœ…" in result + assert "5" in result + + +async def test_update_news_failure(mock_client): + mock_client.update_news.side_effect = Exception("Permission denied") + with patch("src.tools.news.get_client", return_value=mock_client): + inp = UpdateNewsInput(news_id=5, title="Updated") + result = await update_news(inp) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# delete_news +# --------------------------------------------------------------------------- + +async def test_delete_news_success(mock_client): + mock_client.delete_news.return_value = True + with patch("src.tools.news.get_client", return_value=mock_client): + result = await delete_news(news_id=5) + + assert "โœ…" in result + assert "5" in result + assert "deleted" in result.lower() + + +async def test_delete_news_failure(mock_client): + mock_client.delete_news.side_effect = Exception("Not found") + with patch("src.tools.news.get_client", return_value=mock_client): + result = await delete_news(news_id=99) + + assert "โŒ" in result diff --git a/tests/test_projects.py b/tests/test_projects.py new file mode 100644 index 0000000..2233de9 --- /dev/null +++ b/tests/test_projects.py @@ -0,0 +1,171 @@ +"""Tests for src/tools/projects.py.""" + +import pytest +from pydantic import ValidationError +from unittest.mock import patch + +from src.tools.projects import ( + CreateProjectInput, + UpdateProjectInput, + create_project, + delete_project, + get_project, + list_projects, + update_project, +) + + +# --------------------------------------------------------------------------- +# Input model validation +# --------------------------------------------------------------------------- + +def test_create_project_input_valid(): + inp = CreateProjectInput(name="My Project", identifier="my-project") + assert inp.name == "My Project" + assert inp.identifier == "my-project" + + +def test_create_project_input_missing_name(): + with pytest.raises(ValidationError): + CreateProjectInput(identifier="no-name") + + +def test_update_project_input_valid(): + inp = UpdateProjectInput(project_id=5, name="Renamed") + assert inp.project_id == 5 + assert inp.name == "Renamed" + + +def test_update_project_input_invalid_id(): + with pytest.raises(ValidationError): + UpdateProjectInput(project_id=0, name="Bad") + + +# --------------------------------------------------------------------------- +# list_projects +# --------------------------------------------------------------------------- + +async def test_list_projects_returns_projects(mock_client): + mock_client.get_projects.return_value = { + "_embedded": { + "elements": [ + {"id": 1, "name": "Alpha", "active": True, "identifier": "alpha", + "description": {"raw": ""}, "_links": {}}, + ] + } + } + with patch("src.tools.projects.get_client", return_value=mock_client): + result = await list_projects(active_only=True) + + assert "Alpha" in result + + +async def test_list_projects_empty(mock_client): + mock_client.get_projects.return_value = {"_embedded": {"elements": []}} + with patch("src.tools.projects.get_client", return_value=mock_client): + result = await list_projects() + + assert "No projects" in result or result.strip() != "" + + +async def test_list_projects_failure(mock_client): + mock_client.get_projects.side_effect = Exception("Timeout") + with patch("src.tools.projects.get_client", return_value=mock_client): + result = await list_projects() + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# get_project +# --------------------------------------------------------------------------- + +async def test_get_project_success(mock_client): + mock_client.get_project.return_value = { + "id": 3, + "name": "Beta", + "identifier": "beta", + "active": True, + "description": {"raw": "Beta project"}, + } + with patch("src.tools.projects.get_client", return_value=mock_client): + result = await get_project(project_id=3) + + assert "Beta" in result + assert "โœ…" in result + + +async def test_get_project_failure(mock_client): + mock_client.get_project.side_effect = Exception("Not found") + with patch("src.tools.projects.get_client", return_value=mock_client): + result = await get_project(project_id=99) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# create_project +# --------------------------------------------------------------------------- + +async def test_create_project_success(mock_client): + mock_client.create_project.return_value = { + "id": 10, + "name": "New Project", + "identifier": "new-project", + "active": True, + } + with patch("src.tools.projects.get_client", return_value=mock_client): + inp = CreateProjectInput(name="New Project", identifier="new-project") + result = await create_project(inp) + + assert "โœ…" in result + assert "New Project" in result + + +async def test_create_project_failure(mock_client): + mock_client.create_project.side_effect = Exception("Identifier taken") + with patch("src.tools.projects.get_client", return_value=mock_client): + inp = CreateProjectInput(name="Dup", identifier="dup") + result = await create_project(inp) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# update_project +# --------------------------------------------------------------------------- + +async def test_update_project_success(mock_client): + mock_client.update_project.return_value = { + "id": 5, + "name": "Renamed", + "identifier": "renamed", + "active": True, + } + with patch("src.tools.projects.get_client", return_value=mock_client): + inp = UpdateProjectInput(project_id=5, name="Renamed") + result = await update_project(inp) + + assert "โœ…" in result + assert "Renamed" in result + + +# --------------------------------------------------------------------------- +# delete_project +# --------------------------------------------------------------------------- + +async def test_delete_project_success(mock_client): + mock_client.delete_project.return_value = True + with patch("src.tools.projects.get_client", return_value=mock_client): + result = await delete_project(project_id=5) + + assert "โœ…" in result + assert "5" in result + + +async def test_delete_project_failure(mock_client): + mock_client.delete_project.side_effect = Exception("Permission denied") + with patch("src.tools.projects.get_client", return_value=mock_client): + result = await delete_project(project_id=5) + + assert "โŒ" in result diff --git a/tests/test_readonly_mode.py b/tests/test_readonly_mode.py new file mode 100644 index 0000000..2ba72f4 --- /dev/null +++ b/tests/test_readonly_mode.py @@ -0,0 +1,318 @@ +""" +Tests for READ_ONLY_MODE feature. + +Three layers: + Layer 1 โ€” OpenProjectClient._request unit tests (no HTTP mock needed; + the guard raises before any network call is attempted) + Layer 2 โ€” Tool-level tests: write tools return โŒ when the client raises + the readonly exception; read tools are unaffected + Layer 3 โ€” server.py integration: READ_ONLY_MODE env var is wired correctly +""" + +import os +import pytest +from unittest.mock import AsyncMock, patch + +from src.client import OpenProjectClient + +# Shared exception that simulates what _request raises in read-only mode +_READONLY_EXC = Exception( + "Read-only mode is enabled: POST operations are not permitted. " + "Set READ_ONLY_MODE=false to allow write operations." +) + + +# --------------------------------------------------------------------------- +# Layer 1: OpenProjectClient unit tests +# --------------------------------------------------------------------------- + +def test_client_readonly_false_by_default(): + client = OpenProjectClient(base_url="http://test.local", api_key="key") + assert client.readonly is False + + +def test_client_readonly_true_when_set(): + client = OpenProjectClient(base_url="http://test.local", api_key="key", readonly=True) + assert client.readonly is True + + +async def test_request_blocks_post_in_readonly(): + client = OpenProjectClient(base_url="http://test.local", api_key="key", readonly=True) + with pytest.raises(Exception, match="Read-only mode"): + await client._request("POST", "/projects", data={"name": "X"}) + + +async def test_request_blocks_patch_in_readonly(): + client = OpenProjectClient(base_url="http://test.local", api_key="key", readonly=True) + with pytest.raises(Exception, match="Read-only mode"): + await client._request("PATCH", "/projects/1", data={}) + + +async def test_request_blocks_put_in_readonly(): + client = OpenProjectClient(base_url="http://test.local", api_key="key", readonly=True) + with pytest.raises(Exception, match="Read-only mode"): + await client._request("PUT", "/work_packages/5", data={}) + + +async def test_request_blocks_delete_in_readonly(): + client = OpenProjectClient(base_url="http://test.local", api_key="key", readonly=True) + with pytest.raises(Exception, match="Read-only mode"): + await client._request("DELETE", "/projects/1") + + +async def test_request_does_not_block_get_in_readonly(): + """GET is allowed; the guard must NOT raise for read-only GET requests.""" + client = OpenProjectClient(base_url="http://test.local", api_key="key", readonly=True) + # The request will fail with a network error (no real server), but NOT + # with the read-only guard โ€” that's what we assert. + with pytest.raises(Exception) as exc_info: + await client._request("GET", "/projects") + assert "Read-only mode" not in str(exc_info.value) + + +async def test_request_not_blocked_when_not_readonly(): + """In read-write mode, the guard never fires even for POST/DELETE.""" + client = OpenProjectClient(base_url="http://test.local", api_key="key", readonly=False) + with pytest.raises(Exception) as exc_info: + await client._request("POST", "/projects", data={"name": "X"}) + assert "Read-only mode" not in str(exc_info.value) + + +def test_write_methods_constant_covers_all_mutating_verbs(): + from src.client import _WRITE_METHODS + assert "POST" in _WRITE_METHODS + assert "PATCH" in _WRITE_METHODS + assert "PUT" in _WRITE_METHODS + assert "DELETE" in _WRITE_METHODS + assert "GET" not in _WRITE_METHODS + assert "HEAD" not in _WRITE_METHODS + + +# --------------------------------------------------------------------------- +# Layer 2: Tool-level tests +# --------------------------------------------------------------------------- + +# --- projects --------------------------------------------------------------- + +async def test_create_project_blocked(mock_client): + from src.tools.projects import create_project, CreateProjectInput + mock_client.create_project.side_effect = _READONLY_EXC + with patch("src.tools.projects.get_client", return_value=mock_client): + result = await create_project(CreateProjectInput(name="X", identifier="x")) + assert "โŒ" in result + assert "Read-only mode" in result + + +async def test_update_project_blocked(mock_client): + from src.tools.projects import update_project, UpdateProjectInput + mock_client.update_project.side_effect = _READONLY_EXC + with patch("src.tools.projects.get_client", return_value=mock_client): + result = await update_project(UpdateProjectInput(project_id=1, name="Y")) + assert "โŒ" in result + assert "Read-only mode" in result + + +async def test_delete_project_blocked(mock_client): + from src.tools.projects import delete_project + mock_client.delete_project.side_effect = _READONLY_EXC + with patch("src.tools.projects.get_client", return_value=mock_client): + result = await delete_project(project_id=1) + assert "โŒ" in result + assert "Read-only mode" in result + + +# --- work_packages ---------------------------------------------------------- + +async def test_create_work_package_blocked(mock_client): + from src.tools.work_packages import create_work_package, CreateWorkPackageInput + mock_client.create_work_package.side_effect = _READONLY_EXC + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await create_work_package( + CreateWorkPackageInput(project_id=1, subject="T", type_id=1) + ) + assert "โŒ" in result + assert "Read-only mode" in result + + +async def test_delete_work_package_blocked(mock_client): + from src.tools.work_packages import delete_work_package + mock_client.delete_work_package.side_effect = _READONLY_EXC + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await delete_work_package(work_package_id=42) + assert "โŒ" in result + assert "Read-only mode" in result + + +# --- memberships ------------------------------------------------------------ + +async def test_create_membership_blocked(mock_client): + from src.tools.memberships import create_membership, CreateMembershipInput + mock_client.create_membership.side_effect = _READONLY_EXC + with patch("src.tools.memberships.get_client", return_value=mock_client): + result = await create_membership( + CreateMembershipInput(project_id=1, user_id=2, role_id=3) + ) + assert "โŒ" in result + assert "Read-only mode" in result + + +async def test_delete_membership_blocked(mock_client): + from src.tools.memberships import delete_membership + mock_client.delete_membership.side_effect = _READONLY_EXC + with patch("src.tools.memberships.get_client", return_value=mock_client): + result = await delete_membership(membership_id=5) + assert "โŒ" in result + assert "Read-only mode" in result + + +# --- hierarchy -------------------------------------------------------------- + +async def test_set_work_package_parent_blocked(mock_client): + from src.tools.hierarchy import set_work_package_parent + mock_client.update_work_package.side_effect = _READONLY_EXC + with patch("src.tools.hierarchy.get_client", return_value=mock_client): + result = await set_work_package_parent(child_id=10, parent_id=5) + assert "โŒ" in result + assert "Read-only mode" in result + + +async def test_remove_work_package_parent_blocked(mock_client): + from src.tools.hierarchy import remove_work_package_parent + mock_client.update_work_package.side_effect = _READONLY_EXC + with patch("src.tools.hierarchy.get_client", return_value=mock_client): + result = await remove_work_package_parent(work_package_id=10) + assert "โŒ" in result + assert "Read-only mode" in result + + +# --- relations -------------------------------------------------------------- + +async def test_create_relation_blocked(mock_client): + from src.tools.relations import create_work_package_relation, CreateRelationInput + mock_client.create_work_package_relation.side_effect = _READONLY_EXC + with patch("src.tools.relations.get_client", return_value=mock_client): + result = await create_work_package_relation( + CreateRelationInput(from_id=1, to_id=2, type="follows") + ) + assert "โŒ" in result + assert "Read-only mode" in result + + +async def test_delete_relation_blocked(mock_client): + from src.tools.relations import delete_work_package_relation + mock_client.delete_work_package_relation.side_effect = _READONLY_EXC + with patch("src.tools.relations.get_client", return_value=mock_client): + result = await delete_work_package_relation(relation_id=7) + assert "โŒ" in result + assert "Read-only mode" in result + + +# --- time_entries ----------------------------------------------------------- + +async def test_create_time_entry_blocked(mock_client): + from src.tools.time_entries import create_time_entry, CreateTimeEntryInput + mock_client.create_time_entry.side_effect = _READONLY_EXC + with patch("src.tools.time_entries.get_client", return_value=mock_client): + result = await create_time_entry( + CreateTimeEntryInput( + work_package_id=1, hours=2.0, spent_on="2026-01-01", activity_id=3 + ) + ) + assert "โŒ" in result + assert "Read-only mode" in result + + +async def test_delete_time_entry_blocked(mock_client): + from src.tools.time_entries import delete_time_entry + mock_client.delete_time_entry.side_effect = _READONLY_EXC + with patch("src.tools.time_entries.get_client", return_value=mock_client): + result = await delete_time_entry(time_entry_id=3) + assert "โŒ" in result + assert "Read-only mode" in result + + +# --- versions --------------------------------------------------------------- + +async def test_create_version_blocked(mock_client): + from src.tools.versions import create_version, CreateVersionInput + mock_client.create_version.side_effect = _READONLY_EXC + with patch("src.tools.versions.get_client", return_value=mock_client): + result = await create_version(CreateVersionInput(project_id=1, name="v1.0")) + assert "โŒ" in result + assert "Read-only mode" in result + + +# --- news ------------------------------------------------------------------- + +async def test_create_news_blocked(mock_client): + from src.tools.news import create_news, CreateNewsInput + mock_client.create_news.side_effect = _READONLY_EXC + with patch("src.tools.news.get_client", return_value=mock_client): + result = await create_news( + CreateNewsInput(project_id=1, title="T", summary="S", description="D") + ) + assert "โŒ" in result + assert "Read-only mode" in result + + +async def test_update_news_blocked(mock_client): + from src.tools.news import update_news, UpdateNewsInput + mock_client.update_news.side_effect = _READONLY_EXC + with patch("src.tools.news.get_client", return_value=mock_client): + result = await update_news(UpdateNewsInput(news_id=1, title="New")) + assert "โŒ" in result + assert "Read-only mode" in result + + +async def test_delete_news_blocked(mock_client): + from src.tools.news import delete_news + mock_client.delete_news.side_effect = _READONLY_EXC + with patch("src.tools.news.get_client", return_value=mock_client): + result = await delete_news(news_id=1) + assert "โŒ" in result + assert "Read-only mode" in result + + +# --------------------------------------------------------------------------- +# Read tools must NOT be affected by the readonly mode +# --------------------------------------------------------------------------- + +async def test_list_projects_allowed_in_readonly(mock_client): + from src.tools.projects import list_projects + mock_client.get_projects.return_value = {"_embedded": {"elements": []}} + with patch("src.tools.projects.get_client", return_value=mock_client): + result = await list_projects() + assert "Read-only mode" not in result + + +async def test_list_work_packages_allowed_in_readonly(mock_client): + from src.tools.work_packages import list_work_packages + mock_client.get_work_packages.return_value = {"_embedded": {"elements": []}, "total": 0} + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await list_work_packages() + assert "Read-only mode" not in result + + +async def test_list_news_allowed_in_readonly(mock_client): + from src.tools.news import list_news + mock_client.get_news.return_value = {"_embedded": {"elements": []}, "total": 0} + with patch("src.tools.news.get_client", return_value=mock_client): + result = await list_news() + assert "Read-only mode" not in result + + +# --------------------------------------------------------------------------- +# Layer 3: server.py โ€” is_readonly() helper +# --------------------------------------------------------------------------- + +def test_is_readonly_returns_false_by_default(): + from src.server import is_readonly + # conftest sets READ_ONLY_MODE=false, so the loaded server has readonly=False + assert is_readonly() is False + + +def test_client_has_readonly_attribute(): + from src.server import get_client + client = get_client() + assert hasattr(client, "readonly") + assert client.readonly is False diff --git a/tests/test_relations.py b/tests/test_relations.py new file mode 100644 index 0000000..b4546b4 --- /dev/null +++ b/tests/test_relations.py @@ -0,0 +1,181 @@ +"""Tests for src/tools/relations.py.""" + +import pytest +from pydantic import ValidationError +from unittest.mock import patch + +from src.tools.relations import ( + CreateRelationInput, + UpdateRelationInput, + create_work_package_relation, + delete_work_package_relation, + get_work_package_relation, + list_work_package_relations, + update_work_package_relation, +) + +_RELATION = { + "id": 5, + "type": "follows", + "lag": 2, + "description": "Wait 2 days", + "_embedded": { + "from": {"id": 10, "subject": "Task A"}, + "to": {"id": 20, "subject": "Task B"}, + }, +} + + +# --------------------------------------------------------------------------- +# Input model validation +# --------------------------------------------------------------------------- + +def test_create_relation_input_valid(): + inp = CreateRelationInput(from_id=10, to_id=20, type="follows") + assert inp.from_id == 10 + assert inp.type == "follows" + + +def test_create_relation_input_invalid_zero_id(): + with pytest.raises(ValidationError): + CreateRelationInput(from_id=0, to_id=20, type="follows") + + +def test_update_relation_input_valid(): + inp = UpdateRelationInput(relation_id=5, lag=3, description="Updated") + assert inp.relation_id == 5 + assert inp.lag == 3 + + +def test_update_relation_input_invalid_id(): + with pytest.raises(ValidationError): + UpdateRelationInput(relation_id=0) + + +# --------------------------------------------------------------------------- +# create_work_package_relation +# --------------------------------------------------------------------------- + +async def test_create_relation_success(mock_client): + mock_client.create_work_package_relation.return_value = _RELATION + with patch("src.tools.relations.get_client", return_value=mock_client): + inp = CreateRelationInput(from_id=10, to_id=20, type="follows", lag=2) + result = await create_work_package_relation(inp) + + assert "โœ…" in result + assert "follows" in result + assert "Task A" in result + assert "Task B" in result + + +async def test_create_relation_failure(mock_client): + mock_client.create_work_package_relation.side_effect = Exception("Conflict") + with patch("src.tools.relations.get_client", return_value=mock_client): + inp = CreateRelationInput(from_id=10, to_id=20, type="blocks") + result = await create_work_package_relation(inp) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# list_work_package_relations +# --------------------------------------------------------------------------- + +async def test_list_relations_success(mock_client): + mock_client.list_work_package_relations.return_value = { + "_embedded": {"elements": [_RELATION]} + } + with patch("src.tools.relations.get_client", return_value=mock_client): + result = await list_work_package_relations(work_package_id=10) + + assert "follows" in result + assert "Task A" in result + + +async def test_list_relations_empty(mock_client): + mock_client.list_work_package_relations.return_value = { + "_embedded": {"elements": []} + } + with patch("src.tools.relations.get_client", return_value=mock_client): + result = await list_work_package_relations(work_package_id=10) + + assert "no relations" in result.lower() + + +# --------------------------------------------------------------------------- +# get_work_package_relation +# --------------------------------------------------------------------------- + +async def test_get_relation_success(mock_client): + mock_client.get_work_package_relation.return_value = _RELATION + with patch("src.tools.relations.get_client", return_value=mock_client): + result = await get_work_package_relation(relation_id=5) + + assert "โœ…" in result + assert "follows" in result + assert "Task A" in result + + +async def test_get_relation_failure(mock_client): + mock_client.get_work_package_relation.side_effect = Exception("Not found") + with patch("src.tools.relations.get_client", return_value=mock_client): + result = await get_work_package_relation(relation_id=99) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# update_work_package_relation +# --------------------------------------------------------------------------- + +async def test_update_relation_success(mock_client): + mock_client.update_work_package_relation.return_value = { + "id": 5, + "type": "follows", + "lag": 3, + "description": "Updated", + } + with patch("src.tools.relations.get_client", return_value=mock_client): + inp = UpdateRelationInput(relation_id=5, lag=3, description="Updated") + result = await update_work_package_relation(inp) + + assert "โœ…" in result + assert "5" in result + + +async def test_update_relation_no_fields(mock_client): + with patch("src.tools.relations.get_client", return_value=mock_client): + inp = UpdateRelationInput(relation_id=5) + result = await update_work_package_relation(inp) + + assert "โŒ" in result + mock_client.update_work_package_relation.assert_not_called() + + +# --------------------------------------------------------------------------- +# delete_work_package_relation +# --------------------------------------------------------------------------- + +async def test_delete_relation_success(mock_client): + mock_client.delete_work_package_relation.return_value = True + with patch("src.tools.relations.get_client", return_value=mock_client): + result = await delete_work_package_relation(relation_id=5) + + assert "โœ…" in result + assert "5" in result + + +async def test_delete_relation_api_returns_false(mock_client): + mock_client.delete_work_package_relation.return_value = False + with patch("src.tools.relations.get_client", return_value=mock_client): + result = await delete_work_package_relation(relation_id=5) + + assert "โŒ" in result + + +async def test_delete_relation_failure(mock_client): + mock_client.delete_work_package_relation.side_effect = Exception("Not found") + with patch("src.tools.relations.get_client", return_value=mock_client): + result = await delete_work_package_relation(relation_id=99) + + assert "โŒ" in result diff --git a/tests/test_time_entries.py b/tests/test_time_entries.py new file mode 100644 index 0000000..540261b --- /dev/null +++ b/tests/test_time_entries.py @@ -0,0 +1,219 @@ +"""Tests for src/tools/time_entries.py.""" + +import pytest +from pydantic import ValidationError +from unittest.mock import patch + +from src.tools.time_entries import ( + CreateTimeEntryInput, + UpdateTimeEntryInput, + create_time_entry, + delete_time_entry, + list_time_entries, + list_time_entry_activities, + update_time_entry, +) + +_ENTRY = { + "id": 7, + "hours": 2.5, + "spentOn": "2025-03-01", + "comment": {"raw": "Worked on login"}, + "_embedded": { + "workPackage": {"subject": "Fix login bug"}, + "user": {"name": "Alice"}, + "activity": {"name": "Development"}, + }, +} + + +# --------------------------------------------------------------------------- +# Input model validation +# --------------------------------------------------------------------------- + +def test_create_time_entry_input_valid(): + inp = CreateTimeEntryInput( + work_package_id=42, hours=3.0, spent_on="2025-03-01", activity_id=3 + ) + assert inp.hours == 3.0 + + +def test_create_time_entry_input_zero_hours(): + with pytest.raises(ValidationError): + CreateTimeEntryInput( + work_package_id=42, hours=0, spent_on="2025-03-01", activity_id=3 + ) + + +def test_create_time_entry_input_negative_hours(): + with pytest.raises(ValidationError): + CreateTimeEntryInput( + work_package_id=42, hours=-1.5, spent_on="2025-03-01", activity_id=3 + ) + + +def test_update_time_entry_input_valid(): + inp = UpdateTimeEntryInput(time_entry_id=7, hours=4.0, spent_on="2025-03-02") + assert inp.time_entry_id == 7 + assert inp.hours == 4.0 + + +def test_update_time_entry_input_invalid_id(): + with pytest.raises(ValidationError): + UpdateTimeEntryInput(time_entry_id=0, hours=1.0) + + +# --------------------------------------------------------------------------- +# list_time_entries +# --------------------------------------------------------------------------- + +async def test_list_time_entries_success(mock_client): + mock_client.get_time_entries.return_value = {"_embedded": {"elements": [_ENTRY]}} + with patch("src.tools.time_entries.get_client", return_value=mock_client): + result = await list_time_entries() + + assert "Fix login bug" in result + assert "2.5" in result + + +async def test_list_time_entries_with_filters(mock_client): + mock_client.get_time_entries.return_value = {"_embedded": {"elements": [_ENTRY]}} + with patch("src.tools.time_entries.get_client", return_value=mock_client): + result = await list_time_entries( + work_package_id=42, user_id=5, + from_date="2025-03-01", to_date="2025-03-31" + ) + + assert "Fix login bug" in result + + +async def test_list_time_entries_empty(mock_client): + mock_client.get_time_entries.return_value = {"_embedded": {"elements": []}} + with patch("src.tools.time_entries.get_client", return_value=mock_client): + result = await list_time_entries() + + assert "No time entries found" in result + + +async def test_list_time_entries_failure(mock_client): + mock_client.get_time_entries.side_effect = Exception("Server error") + with patch("src.tools.time_entries.get_client", return_value=mock_client): + result = await list_time_entries() + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# create_time_entry +# --------------------------------------------------------------------------- + +async def test_create_time_entry_success(mock_client): + mock_client.create_time_entry.return_value = _ENTRY + with patch("src.tools.time_entries.get_client", return_value=mock_client): + inp = CreateTimeEntryInput( + work_package_id=42, hours=2.5, spent_on="2025-03-01", + activity_id=3, comment="Worked on login" + ) + result = await create_time_entry(inp) + + assert "โœ…" in result + assert "2.5" in result + + +async def test_create_time_entry_failure(mock_client): + mock_client.create_time_entry.side_effect = Exception("Invalid activity") + with patch("src.tools.time_entries.get_client", return_value=mock_client): + inp = CreateTimeEntryInput( + work_package_id=42, hours=1.0, spent_on="2025-03-01", activity_id=99 + ) + result = await create_time_entry(inp) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# update_time_entry +# --------------------------------------------------------------------------- + +async def test_update_time_entry_success(mock_client): + mock_client.update_time_entry.return_value = { + "id": 7, "hours": 4.0, "spentOn": "2025-03-02", + "_embedded": {"activity": {"name": "Testing"}}, + } + with patch("src.tools.time_entries.get_client", return_value=mock_client): + inp = UpdateTimeEntryInput(time_entry_id=7, hours=4.0) + result = await update_time_entry(inp) + + assert "โœ…" in result + assert "4.0" in result + + +async def test_update_time_entry_no_fields(mock_client): + with patch("src.tools.time_entries.get_client", return_value=mock_client): + inp = UpdateTimeEntryInput(time_entry_id=7) + result = await update_time_entry(inp) + + assert "โŒ" in result + mock_client.update_time_entry.assert_not_called() + + +# --------------------------------------------------------------------------- +# delete_time_entry +# --------------------------------------------------------------------------- + +async def test_delete_time_entry_success(mock_client): + mock_client.delete_time_entry.return_value = True + with patch("src.tools.time_entries.get_client", return_value=mock_client): + result = await delete_time_entry(time_entry_id=7) + + assert "โœ…" in result + assert "7" in result + + +async def test_delete_time_entry_failure(mock_client): + mock_client.delete_time_entry.side_effect = Exception("Not found") + with patch("src.tools.time_entries.get_client", return_value=mock_client): + result = await delete_time_entry(time_entry_id=99) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# list_time_entry_activities +# --------------------------------------------------------------------------- + +async def test_list_time_entry_activities_success(mock_client): + mock_client.get_time_entry_activities.return_value = { + "_embedded": { + "elements": [ + {"id": 3, "name": "Development", "isDefault": True}, + {"id": 4, "name": "Testing"}, + ] + } + } + with patch("src.tools.time_entries.get_client", return_value=mock_client): + result = await list_time_entry_activities() + + assert "Development" in result + assert "Testing" in result + + +async def test_list_time_entry_activities_empty_fallback(mock_client): + """When the API returns no activities the tool should show common IDs.""" + mock_client.get_time_entry_activities.return_value = {"_embedded": {"elements": []}} + with patch("src.tools.time_entries.get_client", return_value=mock_client): + result = await list_time_entry_activities() + + # Falls back to hardcoded common activities + assert "Management" in result + assert "Development" in result + + +async def test_list_time_entry_activities_api_error_fallback(mock_client): + """On API error the tool should still return common activities.""" + mock_client.get_time_entry_activities.side_effect = Exception("404 Not Found") + with patch("src.tools.time_entries.get_client", return_value=mock_client): + result = await list_time_entry_activities() + + assert "Management" in result + assert "Development" in result diff --git a/tests/test_unit.py b/tests/test_unit.py new file mode 100644 index 0000000..1b00a5e --- /dev/null +++ b/tests/test_unit.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python3 +""" +Unit Tests for OpenProject MCP Server + +These tests focus on individual components and don't require a running OpenProject instance. +Adapted from PR #4 (addictivedev/openproject-mcp-server) to work with the modular src/ structure. +""" + +import sys +import os +import pytest +from unittest.mock import Mock, AsyncMock, patch + +# Add project root to path +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from src.client import OpenProjectClient + + +class TestOpenProjectClient: + """Test cases for OpenProjectClient""" + + def test_init(self): + """Test client initialization""" + client = OpenProjectClient("https://test.openproject.com", "test-key") + assert client.base_url == "https://test.openproject.com" + assert client.api_key == "test-key" + assert client.proxy is None + + def test_init_with_proxy(self): + """Test client initialization with proxy""" + client = OpenProjectClient("https://test.openproject.com", "test-key", "http://proxy:8080") + assert client.proxy == "http://proxy:8080" + + def test_encode_api_key(self): + """Test API key encoding""" + client = OpenProjectClient("https://test.openproject.com", "test-key") + encoded = client._encode_api_key() + assert isinstance(encoded, str) + assert len(encoded) > 0 + + def test_format_error_message(self): + """Test error message formatting""" + client = OpenProjectClient("https://test.openproject.com", "test-key") + + # Test 401 error + error_msg = client._format_error_message(401, "Unauthorized") + assert "Authentication failed" in error_msg + + # Test 403 error + error_msg = client._format_error_message(403, "Forbidden") + assert "Access denied" in error_msg + + # Test 404 error + error_msg = client._format_error_message(404, "Not Found") + assert "Resource not found" in error_msg + + # Test unknown error + error_msg = client._format_error_message(999, "Unknown Error") + assert "Unknown Error" in error_msg + + def test_trailing_slash_stripped(self): + """Test that trailing slash is stripped from base_url""" + client = OpenProjectClient("https://test.openproject.com/", "test-key") + assert client.base_url == "https://test.openproject.com" + + def test_headers_set(self): + """Test that required headers are set""" + client = OpenProjectClient("https://test.openproject.com", "test-key") + assert "Authorization" in client.headers + assert "Content-Type" in client.headers + assert "Accept" in client.headers + assert client.headers["Content-Type"] == "application/json" + assert client.headers["Accept"] == "application/json" + + +class TestRelationClientMethods: + """Test cases for work package relation client methods""" + + @pytest.mark.asyncio + async def test_create_work_package_relation(self): + """Test create_work_package_relation method""" + client = OpenProjectClient("https://test.openproject.com", "test-key") + + mock_result = { + "id": 42, + "type": "follows", + "_embedded": { + "from": {"id": 1, "subject": "Task A"}, + "to": {"id": 2, "subject": "Task B"}, + }, + } + + with patch.object(client, "_request", new_callable=AsyncMock) as mock_request: + mock_request.return_value = mock_result + + data = {"from_id": 1, "to_id": 2, "type": "follows"} + result = await client.create_work_package_relation(data) + + assert result["id"] == 42 + assert result["type"] == "follows" + mock_request.assert_called_once() + call_args = mock_request.call_args + assert call_args[0][0] == "POST" + assert "/work_packages/1/relations" in call_args[0][1] + + @pytest.mark.asyncio + async def test_list_work_package_relations(self): + """Test list_work_package_relations method""" + client = OpenProjectClient("https://test.openproject.com", "test-key") + + mock_result = { + "_embedded": { + "elements": [ + {"id": 1, "type": "follows"}, + {"id": 2, "type": "blocks"}, + ] + } + } + + with patch.object(client, "_request", new_callable=AsyncMock) as mock_request: + mock_request.return_value = mock_result + + result = await client.list_work_package_relations() + + assert len(result["_embedded"]["elements"]) == 2 + mock_request.assert_called_once() + call_args = mock_request.call_args + assert call_args[0][0] == "GET" + assert "/relations" in call_args[0][1] + + @pytest.mark.asyncio + async def test_delete_work_package_relation(self): + """Test delete_work_package_relation method""" + client = OpenProjectClient("https://test.openproject.com", "test-key") + + with patch.object(client, "_request", new_callable=AsyncMock) as mock_request: + mock_request.return_value = {} + + result = await client.delete_work_package_relation(42) + + assert result is True + mock_request.assert_called_once() + call_args = mock_request.call_args + assert call_args[0][0] == "DELETE" + assert "/relations/42" in call_args[0][1] + + @pytest.mark.asyncio + async def test_get_work_package_relation(self): + """Test get_work_package_relation method""" + client = OpenProjectClient("https://test.openproject.com", "test-key") + + mock_result = {"id": 42, "type": "blocks"} + + with patch.object(client, "_request", new_callable=AsyncMock) as mock_request: + mock_request.return_value = mock_result + + result = await client.get_work_package_relation(42) + + assert result["id"] == 42 + assert result["type"] == "blocks" + mock_request.assert_called_once_with("GET", "/relations/42") + + @pytest.mark.asyncio + async def test_create_relation_missing_from_id(self): + """Test that create_work_package_relation raises error without from_id""" + client = OpenProjectClient("https://test.openproject.com", "test-key") + + with pytest.raises(ValueError, match="from_id is required"): + await client.create_work_package_relation({"to_id": 2, "type": "follows"}) + + +@pytest.mark.asyncio +async def test_async_operations(): + """Test that async operations work correctly""" + client = OpenProjectClient("https://test.openproject.com", "test-key") + + with patch.object(client, "_request", new_callable=AsyncMock) as mock_request: + mock_request.return_value = {"_type": "Root", "instanceVersion": "13.0.0"} + + result = await client.test_connection() + + assert result["_type"] == "Root" + assert result["instanceVersion"] == "13.0.0" + mock_request.assert_called_once_with("GET", "") + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/test_users.py b/tests/test_users.py new file mode 100644 index 0000000..f9ab940 --- /dev/null +++ b/tests/test_users.py @@ -0,0 +1,197 @@ +"""Tests for src/tools/users.py.""" + +from unittest.mock import patch + +from src.tools.users import ( + get_role, + get_user, + list_project_members, + list_roles, + list_user_projects, + list_users, +) + +_USER = { + "id": 7, + "name": "Bob Smith", + "email": "bob@example.com", + "login": "bob", + "status": "active", + "admin": False, +} + +_MEMBERSHIP = { + "_links": { + "principal": {"title": "Bob Smith", "href": "/api/v3/users/7"}, + "project": {"title": "Alpha"}, + "roles": [{"title": "Developer"}], + } +} + + +# --------------------------------------------------------------------------- +# list_users +# --------------------------------------------------------------------------- + +async def test_list_users_success(mock_client): + mock_client.get_users.return_value = {"_embedded": {"elements": [_USER]}} + with patch("src.tools.users.get_client", return_value=mock_client): + result = await list_users() + + assert "Bob Smith" in result + assert "bob@example.com" in result + + +async def test_list_users_with_name_filter(mock_client): + mock_client.get_users.return_value = {"_embedded": {"elements": [_USER]}} + with patch("src.tools.users.get_client", return_value=mock_client): + result = await list_users(name="Bob") + + assert "Bob Smith" in result + # Confirm a filter JSON was passed to get_users + call_args = mock_client.get_users.call_args + assert call_args is not None + filters_arg = call_args[0][0] if call_args[0] else call_args[1].get("filters_json") + assert filters_arg is not None + + +async def test_list_users_empty(mock_client): + mock_client.get_users.return_value = {"_embedded": {"elements": []}} + with patch("src.tools.users.get_client", return_value=mock_client): + result = await list_users() + + assert "No users found" in result + + +async def test_list_users_failure(mock_client): + mock_client.get_users.side_effect = Exception("API error") + with patch("src.tools.users.get_client", return_value=mock_client): + result = await list_users() + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# get_user +# --------------------------------------------------------------------------- + +async def test_get_user_success(mock_client): + mock_client.get_user.return_value = _USER + with patch("src.tools.users.get_client", return_value=mock_client): + result = await get_user(user_id=7) + + assert "Bob Smith" in result + assert "โœ…" in result + + +async def test_get_user_failure(mock_client): + mock_client.get_user.side_effect = Exception("Not found") + with patch("src.tools.users.get_client", return_value=mock_client): + result = await get_user(user_id=99) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# list_roles +# --------------------------------------------------------------------------- + +async def test_list_roles_success(mock_client): + mock_client.get_roles.return_value = { + "_embedded": {"elements": [{"id": 1, "name": "Developer"}, {"id": 2, "name": "Manager"}]} + } + with patch("src.tools.users.get_client", return_value=mock_client): + result = await list_roles() + + assert "Developer" in result + assert "Manager" in result + + +async def test_list_roles_empty(mock_client): + mock_client.get_roles.return_value = {"_embedded": {"elements": []}} + with patch("src.tools.users.get_client", return_value=mock_client): + result = await list_roles() + + assert "No roles found" in result + + +# --------------------------------------------------------------------------- +# get_role +# --------------------------------------------------------------------------- + +async def test_get_role_success(mock_client): + mock_client.get_role.return_value = { + "id": 1, + "name": "Developer", + "_embedded": { + "permissions": [{"name": "view_work_packages"}, {"name": "edit_work_packages"}] + }, + } + with patch("src.tools.users.get_client", return_value=mock_client): + result = await get_role(role_id=1) + + assert "Developer" in result + assert "view_work_packages" in result + + +async def test_get_role_failure(mock_client): + mock_client.get_role.side_effect = Exception("Not found") + with patch("src.tools.users.get_client", return_value=mock_client): + result = await get_role(role_id=99) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# list_project_members +# --------------------------------------------------------------------------- + +async def test_list_project_members_success(mock_client): + mock_client.get_memberships.return_value = { + "_embedded": {"elements": [_MEMBERSHIP]} + } + with patch("src.tools.users.get_client", return_value=mock_client): + result = await list_project_members(project_id=1) + + assert "Bob Smith" in result + assert "Developer" in result + + +async def test_list_project_members_empty(mock_client): + mock_client.get_memberships.return_value = {"_embedded": {"elements": []}} + with patch("src.tools.users.get_client", return_value=mock_client): + result = await list_project_members(project_id=1) + + assert "No members found" in result + + +# --------------------------------------------------------------------------- +# list_user_projects +# --------------------------------------------------------------------------- + +async def test_list_user_projects_success(mock_client): + mock_client.get_memberships.return_value = { + "_embedded": { + "elements": [ + { + "_embedded": { + "project": {"name": "Alpha"}, + "roles": [{"name": "Developer"}], + } + } + ] + } + } + with patch("src.tools.users.get_client", return_value=mock_client): + result = await list_user_projects(user_id=7) + + assert "Alpha" in result + assert "Developer" in result + + +async def test_list_user_projects_not_member(mock_client): + mock_client.get_memberships.return_value = {"_embedded": {"elements": []}} + with patch("src.tools.users.get_client", return_value=mock_client): + result = await list_user_projects(user_id=99) + + assert "not a member" in result diff --git a/tests/test_versions.py b/tests/test_versions.py new file mode 100644 index 0000000..c701c28 --- /dev/null +++ b/tests/test_versions.py @@ -0,0 +1,117 @@ +"""Tests for src/tools/versions.py.""" + +import pytest +from pydantic import ValidationError +from unittest.mock import patch + +from src.tools.versions import CreateVersionInput, create_version, list_versions + +_VERSION = { + "id": 3, + "name": "v1.0", + "status": "open", + "startDate": "2025-01-01", + "endDate": "2025-03-31", + "description": {"raw": "First release"}, + "_embedded": {"definingProject": {"name": "Alpha"}}, +} + + +# --------------------------------------------------------------------------- +# Input model validation +# --------------------------------------------------------------------------- + +def test_create_version_input_valid(): + inp = CreateVersionInput( + project_id=1, name="v1.0", due_date="2025-03-31", status="open" + ) + assert inp.name == "v1.0" + assert inp.project_id == 1 + + +def test_create_version_input_empty_name(): + with pytest.raises(ValidationError): + CreateVersionInput(project_id=1, name="") + + +def test_create_version_input_invalid_project_id(): + with pytest.raises(ValidationError): + CreateVersionInput(project_id=0, name="v1.0") + + +def test_create_version_input_name_too_long(): + with pytest.raises(ValidationError): + CreateVersionInput(project_id=1, name="x" * 256) + + +# --------------------------------------------------------------------------- +# list_versions +# --------------------------------------------------------------------------- + +async def test_list_versions_success(mock_client): + mock_client.get_versions.return_value = {"_embedded": {"elements": [_VERSION]}} + with patch("src.tools.versions.get_client", return_value=mock_client): + result = await list_versions(project_id=1) + + assert "v1.0" in result + assert "open" in result + assert "Alpha" in result + + +async def test_list_versions_empty(mock_client): + mock_client.get_versions.return_value = {"_embedded": {"elements": []}} + with patch("src.tools.versions.get_client", return_value=mock_client): + result = await list_versions(project_id=1) + + assert "No versions found" in result + + +async def test_list_versions_failure(mock_client): + mock_client.get_versions.side_effect = Exception("Not found") + with patch("src.tools.versions.get_client", return_value=mock_client): + result = await list_versions(project_id=99) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# create_version +# --------------------------------------------------------------------------- + +async def test_create_version_success(mock_client): + mock_client.create_version.return_value = _VERSION + with patch("src.tools.versions.get_client", return_value=mock_client): + inp = CreateVersionInput( + project_id=1, name="v1.0", + description="First release", due_date="2025-03-31", status="open" + ) + result = await create_version(inp) + + assert "โœ…" in result + assert "v1.0" in result + assert "open" in result + + +async def test_create_version_with_dates(mock_client): + mock_client.create_version.return_value = { + "id": 4, "name": "v2.0", "status": "open", + "startDate": "2025-04-01", "endDate": "2025-06-30", + } + with patch("src.tools.versions.get_client", return_value=mock_client): + inp = CreateVersionInput( + project_id=1, name="v2.0", + start_date="2025-04-01", due_date="2025-06-30" + ) + result = await create_version(inp) + + assert "2025-04-01" in result + assert "2025-06-30" in result + + +async def test_create_version_failure(mock_client): + mock_client.create_version.side_effect = Exception("Name already taken") + with patch("src.tools.versions.get_client", return_value=mock_client): + inp = CreateVersionInput(project_id=1, name="v1.0") + result = await create_version(inp) + + assert "โŒ" in result diff --git a/tests/test_weekly_reports.py b/tests/test_weekly_reports.py new file mode 100644 index 0000000..718fd69 --- /dev/null +++ b/tests/test_weekly_reports.py @@ -0,0 +1,194 @@ +"""Tests for src/tools/weekly_reports.py.""" + +import json +import pytest +from pydantic import ValidationError +from unittest.mock import patch + +from src.tools.weekly_reports import ( + GenerateWeeklyReportInput, + GetReportDataInput, + generate_last_week_report, + generate_this_week_report, + generate_weekly_report, + get_report_data, +) + +_PROJECT = {"id": 1, "name": "Alpha", "identifier": "alpha"} + +_WP_DONE = { + "id": 10, + "subject": "Implement login", + "updatedAt": "2025-03-05T12:00:00Z", + "createdAt": "2025-03-01T08:00:00Z", + "_embedded": {"status": {"name": "Done"}, "type": {"name": "Task"}}, +} + +_WP_IN_PROGRESS = { + "id": 11, + "subject": "Add dashboard", + "updatedAt": "2025-03-06T14:00:00Z", + "createdAt": "2025-03-02T09:00:00Z", + "_embedded": {"status": {"name": "In Progress"}, "type": {"name": "Feature"}}, +} + + +def _make_wp_page(elements, total=None): + return { + "_embedded": {"elements": elements}, + "total": total if total is not None else len(elements), + } + + +def _setup_report_mocks(mock_client): + """Configure mock_client for a typical generate_weekly_report call.""" + mock_client.get_project.return_value = _PROJECT + # _fetch_all_project_work_packages loops; return one page then empty + mock_client.get_work_packages.side_effect = [ + _make_wp_page([_WP_DONE, _WP_IN_PROGRESS], total=2), + _make_wp_page([]), # terminates loop + ] + mock_client.get_memberships.return_value = { + "_embedded": {"elements": []} + } + mock_client.get_time_entries.return_value = { + "_embedded": {"elements": []} + } + # Suppress unawaited-coroutine warnings from the optional relations fetch + mock_client.get_relations.return_value = {"_embedded": {"elements": []}} + + +# --------------------------------------------------------------------------- +# Input model validation +# --------------------------------------------------------------------------- + +def test_generate_weekly_report_input_valid(): + inp = GenerateWeeklyReportInput( + project_id=1, from_date="2025-03-03", to_date="2025-03-09" + ) + assert inp.format == "markdown" + + +def test_generate_weekly_report_input_invalid_project(): + with pytest.raises(ValidationError): + GenerateWeeklyReportInput(project_id=0, from_date="2025-03-03", to_date="2025-03-09") + + +def test_get_report_data_input_valid(): + inp = GetReportDataInput(project_id=1, from_date="2025-03-03", to_date="2025-03-09") + assert inp.project_id == 1 + + +def test_get_report_data_input_invalid_project(): + with pytest.raises(ValidationError): + GetReportDataInput(project_id=0, from_date="2025-03-03", to_date="2025-03-09") + + +# --------------------------------------------------------------------------- +# generate_weekly_report (markdown) +# --------------------------------------------------------------------------- + +async def test_generate_weekly_report_markdown(mock_client): + _setup_report_mocks(mock_client) + with patch("src.tools.weekly_reports.get_client", return_value=mock_client): + inp = GenerateWeeklyReportInput( + project_id=1, from_date="2025-03-03", to_date="2025-03-09", + team_name="Backend Team", format="markdown" + ) + result = await generate_weekly_report(inp) + + assert "Alpha" in result + assert isinstance(result, str) + assert len(result) > 50 + + +async def test_generate_weekly_report_json_format(mock_client): + _setup_report_mocks(mock_client) + with patch("src.tools.weekly_reports.get_client", return_value=mock_client): + inp = GenerateWeeklyReportInput( + project_id=1, from_date="2025-03-03", to_date="2025-03-09", + format="json" + ) + result = await generate_weekly_report(inp) + + data = json.loads(result) + assert isinstance(data, dict) + + +async def test_generate_weekly_report_invalid_date_format(mock_client): + with patch("src.tools.weekly_reports.get_client", return_value=mock_client): + inp = GenerateWeeklyReportInput( + project_id=1, from_date="03-03-2025", to_date="03-09-2025" + ) + result = await generate_weekly_report(inp) + + assert "โŒ" in result or "Invalid date" in result + + +async def test_generate_weekly_report_date_range_inverted(mock_client): + with patch("src.tools.weekly_reports.get_client", return_value=mock_client): + inp = GenerateWeeklyReportInput( + project_id=1, from_date="2025-03-09", to_date="2025-03-03" + ) + result = await generate_weekly_report(inp) + + assert "โŒ" in result or "before" in result + + +async def test_generate_weekly_report_api_failure(mock_client): + mock_client.get_project.side_effect = Exception("Not found") + with patch("src.tools.weekly_reports.get_client", return_value=mock_client): + inp = GenerateWeeklyReportInput( + project_id=99, from_date="2025-03-03", to_date="2025-03-09" + ) + result = await generate_weekly_report(inp) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# get_report_data +# --------------------------------------------------------------------------- + +async def test_get_report_data_returns_json(mock_client): + _setup_report_mocks(mock_client) + with patch("src.tools.weekly_reports.get_client", return_value=mock_client): + inp = GetReportDataInput( + project_id=1, from_date="2025-03-03", to_date="2025-03-09" + ) + result = await get_report_data(inp) + + data = json.loads(result) + assert isinstance(data, dict) + + +async def test_get_report_data_invalid_dates(mock_client): + with patch("src.tools.weekly_reports.get_client", return_value=mock_client): + inp = GetReportDataInput( + project_id=1, from_date="bad-date", to_date="2025-03-09" + ) + result = await get_report_data(inp) + + assert "โŒ" in result or "Invalid date" in result + + +# --------------------------------------------------------------------------- +# generate_this_week_report / generate_last_week_report +# --------------------------------------------------------------------------- + +async def test_generate_this_week_report(mock_client): + _setup_report_mocks(mock_client) + with patch("src.tools.weekly_reports.get_client", return_value=mock_client): + result = await generate_this_week_report(project_id=1, team_name="Team X") + + assert "Alpha" in result + assert isinstance(result, str) + + +async def test_generate_last_week_report(mock_client): + _setup_report_mocks(mock_client) + with patch("src.tools.weekly_reports.get_client", return_value=mock_client): + result = await generate_last_week_report(project_id=1) + + assert "Alpha" in result + assert isinstance(result, str) diff --git a/tests/test_work_packages.py b/tests/test_work_packages.py new file mode 100644 index 0000000..61ee904 --- /dev/null +++ b/tests/test_work_packages.py @@ -0,0 +1,249 @@ +"""Tests for src/tools/work_packages.py.""" + +import pytest +from pydantic import ValidationError +from unittest.mock import patch + +from src.tools.work_packages import ( + CreateWorkPackageInput, + UpdateWorkPackageInput, + assign_work_package, + create_work_package, + delete_work_package, + list_priorities, + list_statuses, + list_types, + list_work_packages, + search_work_packages, + unassign_work_package, + update_work_package, +) + +_WP = { + "id": 42, + "subject": "Fix login bug", + "_embedded": { + "type": {"name": "Bug"}, + "status": {"name": "In Progress"}, + "priority": {"name": "High"}, + "assignee": {"name": "Alice"}, + }, +} + + +# --------------------------------------------------------------------------- +# Input model validation +# --------------------------------------------------------------------------- + +def test_create_wp_input_valid(): + inp = CreateWorkPackageInput(project_id=1, subject="Do something", type_id=2) + assert inp.project_id == 1 + assert inp.subject == "Do something" + + +def test_create_wp_input_missing_project(): + with pytest.raises(ValidationError): + CreateWorkPackageInput(subject="No project", type_id=1) + + +def test_create_wp_input_invalid_project_id(): + with pytest.raises(ValidationError): + CreateWorkPackageInput(project_id=0, subject="Bad", type_id=1) + + +def test_update_wp_input_valid(): + inp = UpdateWorkPackageInput(work_package_id=10, status_id=3, percentage_done=50) + assert inp.work_package_id == 10 + assert inp.percentage_done == 50 + + +def test_update_wp_input_percentage_out_of_range(): + with pytest.raises(ValidationError): + UpdateWorkPackageInput(work_package_id=1, percentage_done=110) + + +# --------------------------------------------------------------------------- +# list_work_packages +# --------------------------------------------------------------------------- + +async def test_list_work_packages_success(mock_client): + mock_client.get_work_packages.return_value = { + "_embedded": {"elements": [_WP]}, + "total": 1, + } + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await list_work_packages(page_size=10) + + assert "Fix login bug" in result + + +async def test_list_work_packages_empty(mock_client): + mock_client.get_work_packages.return_value = { + "_embedded": {"elements": []}, + "total": 0, + } + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await list_work_packages() + + assert "No work packages" in result or result.strip() != "" + + +async def test_list_work_packages_failure(mock_client): + mock_client.get_work_packages.side_effect = Exception("Server error") + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await list_work_packages() + + assert "โŒ" in result + + +async def test_list_work_packages_with_project_filter(mock_client): + mock_client.get_work_packages.return_value = { + "_embedded": {"elements": [_WP]}, + "total": 1, + } + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await list_work_packages(project_id=5) + + mock_client.get_work_packages.assert_called_once() + assert "Fix login bug" in result + + +# --------------------------------------------------------------------------- +# search_work_packages +# --------------------------------------------------------------------------- + +async def test_search_work_packages_success(mock_client): + mock_client.get_work_packages.return_value = { + "_embedded": {"elements": [_WP]}, + "total": 1, + } + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await search_work_packages(query="login") + + assert "Fix login bug" in result + + +# --------------------------------------------------------------------------- +# create_work_package +# --------------------------------------------------------------------------- + +async def test_create_work_package_success(mock_client): + mock_client.create_work_package.return_value = { + "id": 99, + "subject": "New task", + "_embedded": { + "type": {"name": "Task"}, + "status": {"name": "New"}, + "project": {"name": "Alpha"}, + }, + } + with patch("src.tools.work_packages.get_client", return_value=mock_client): + inp = CreateWorkPackageInput(project_id=1, subject="New task", type_id=1) + result = await create_work_package(inp) + + assert "โœ…" in result + assert "New task" in result + + +async def test_create_work_package_failure(mock_client): + mock_client.create_work_package.side_effect = Exception("Bad request") + with patch("src.tools.work_packages.get_client", return_value=mock_client): + inp = CreateWorkPackageInput(project_id=1, subject="Bad", type_id=1) + result = await create_work_package(inp) + + assert "โŒ" in result + + +# --------------------------------------------------------------------------- +# update_work_package +# --------------------------------------------------------------------------- + +async def test_update_work_package_success(mock_client): + mock_client.update_work_package.return_value = { + "id": 42, + "subject": "Fix login bug", + "_embedded": {"status": {"name": "Done"}, "type": {"name": "Bug"}}, + } + with patch("src.tools.work_packages.get_client", return_value=mock_client): + inp = UpdateWorkPackageInput(work_package_id=42, status_id=5) + result = await update_work_package(inp) + + assert "โœ…" in result + + +# --------------------------------------------------------------------------- +# delete_work_package +# --------------------------------------------------------------------------- + +async def test_delete_work_package_success(mock_client): + mock_client.delete_work_package.return_value = True + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await delete_work_package(work_package_id=42) + + assert "โœ…" in result + assert "42" in result + + +# --------------------------------------------------------------------------- +# list_types / list_statuses / list_priorities +# --------------------------------------------------------------------------- + +async def test_list_types_success(mock_client): + mock_client.get_types.return_value = { + "_embedded": {"elements": [{"id": 1, "name": "Task"}, {"id": 2, "name": "Bug"}]} + } + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await list_types() + + assert "Task" in result + assert "Bug" in result + + +async def test_list_statuses_success(mock_client): + mock_client.get_statuses.return_value = { + "_embedded": {"elements": [{"id": 1, "name": "New"}, {"id": 2, "name": "In Progress"}]} + } + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await list_statuses() + + assert "New" in result + assert "In Progress" in result + + +async def test_list_priorities_success(mock_client): + mock_client.get_priorities.return_value = { + "_embedded": {"elements": [{"id": 1, "name": "Low"}, {"id": 2, "name": "High"}]} + } + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await list_priorities() + + assert "Low" in result + assert "High" in result + + +# --------------------------------------------------------------------------- +# assign / unassign +# --------------------------------------------------------------------------- + +async def test_assign_work_package_success(mock_client): + mock_client.update_work_package.return_value = { + "id": 42, + "subject": "Fix login bug", + "_embedded": {"assignee": {"name": "Alice"}}, + } + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await assign_work_package(work_package_id=42, assignee_id=7) + + assert "โœ…" in result + + +async def test_unassign_work_package_success(mock_client): + mock_client.update_work_package.return_value = { + "id": 42, + "subject": "Fix login bug", + "_embedded": {}, + } + with patch("src.tools.work_packages.get_client", return_value=mock_client): + result = await unassign_work_package(work_package_id=42) + + assert "โœ…" in result