Skip to content

Pin v2 visibility metadata contract to typed-tables-only #1296

Pin v2 visibility metadata contract to typed-tables-only

Pin v2 visibility metadata contract to typed-tables-only #1296

Workflow file for this run

name: build
on:
push:
branches: [ master, v2 ]
pull_request:
branches: [ master, v2 ]
jobs:
quality:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 10
- name: Validate composer.json and composer.lock
run: composer validate --strict
- name: Cache Composer packages
uses: actions/cache@v5
with:
path: vendor
key: ${{ runner.os }}-php-${{ hashFiles('**/composer.lock') }}
restore-keys: |
${{ runner.os }}-php-
- name: Install dependencies
run: composer install --prefer-dist --no-progress
- name: Check coding style via ECS
run: vendor/bin/ecs check
- name: Run static analysis via PHPStan
run: vendor/bin/phpstan analyse src tests
feature-mysql:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
shard: [0, 1, 2, 3]
services:
mysql:
image: mysql
env:
MYSQL_ROOT_PASSWORD: password
# The feature suite runs migrate:fresh for every test case. MySQL's
# default overlay-backed datadir makes that repeated DDL/fsync path the
# CI long pole; keep test data on tmpfs because this database is
# disposable for each job.
options: >-
--tmpfs /var/lib/mysql:rw,noexec,nosuid,size=1024m
ports:
- 3306:3306
redis:
image: redis
ports:
- 6379:6379
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 10
- name: Cache Composer packages
uses: actions/cache@v5
with:
path: vendor
key: ${{ runner.os }}-php-${{ hashFiles('**/composer.lock') }}
restore-keys: |
${{ runner.os }}-php-
- name: Install dependencies
run: composer install --prefer-dist --no-progress
- name: Export GitHub Actions test defaults
run: |
{
echo "APP_KEY=base64:i3g6f+dV8FfsIkcxqd7gbiPn2oXk5r00sTmdD6V5utI="
echo "DB_HOST=127.0.0.1"
echo "DB_DATABASE=testbench"
echo "DB_USERNAME=root"
echo "DB_PASSWORD=password"
echo "QUEUE_FAILED_DRIVER=null"
echo "REDIS_HOST=127.0.0.1"
echo "REDIS_PASSWORD="
echo "REDIS_PORT=${{ job.services.redis.ports['6379'] }}"
# Testbench defaults cache to array (per-process, in-memory). The V2
# TaskWatchdog throttle set by Tests\TestCase::setUp needs to be
# visible to the two queue:work worker processes so their wake()
# poll short-circuits — array cache means the workers never see the
# test's throttle and runPass races the test's DB state (originally
# diagnosed as the #427 PG "hang" and the MySQL Mode-B flakes).
# The local docker stack uses tests/.env.feature which already sets
# CACHE_DRIVER=redis for the same reason; mirror it here.
echo "CACHE_DRIVER=redis"
echo "CACHE_STORE=redis"
} >> "$GITHUB_ENV"
- name: Create MySQL database
run: mysql -e 'CREATE DATABASE testbench' -h127.0.0.1 -uroot -ppassword -P ${{ job.services.mysql.ports['3306'] }}
- name: Run feature test suite (MySQL shard ${{ matrix.shard }})
id: mysql_suite
timeout-minutes: 65
run: |
mkdir -p build/test-results
php scripts/ci/split-feature-tests.php \
--dir=tests/Feature \
--shard=${{ matrix.shard }} \
--shards=4 \
--weights=.github/feature-test-timings.json \
--weight-profile=mysql \
--summary="build/test-results/mysql-shard-${{ matrix.shard }}-summary.txt" \
> /tmp/mysql-shard-files
cat /tmp/mysql-shard-files
cp /tmp/mysql-shard-files "build/test-results/mysql-shard-${{ matrix.shard }}-files.txt"
set +e
timeout --foreground 60m xargs -a /tmp/mysql-shard-files \
vendor/bin/phpunit --testdox --testsuite feature \
--log-junit "build/test-results/mysql-shard-${{ matrix.shard }}.xml"
status=$?
set -e
if [ -f "build/test-results/mysql-shard-${{ matrix.shard }}.xml" ]; then
php scripts/ci/summarize-junit-times.php \
--input="build/test-results/mysql-shard-${{ matrix.shard }}.xml" \
--output="build/test-results/mysql-shard-${{ matrix.shard }}-timing.md" \
--csv="build/test-results/mysql-shard-${{ matrix.shard }}-timing.csv"
fi
exit "$status"
env:
DB_CONNECTION: mysql
DB_PORT: ${{ job.services.mysql.ports['3306'] }}
QUEUE_CONNECTION: redis
XDEBUG_MODE: off
- name: Upload MySQL feature timing report
if: always()
uses: actions/upload-artifact@v4
with:
name: feature-timing-mysql-${{ matrix.shard }}
path: build/test-results/mysql-shard-${{ matrix.shard }}*
- name: Upload laravel.log if MySQL tests fail
if: failure() || steps.mysql_suite.outcome == 'failure'
uses: actions/upload-artifact@v4
with:
name: laravel-log-mysql-${{ matrix.shard }}
path: vendor/orchestra/testbench-core/laravel/storage/logs/laravel.log
feature-postgresql:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
shard: [0, 1]
services:
postgres:
image: postgres
env:
POSTGRES_USER: root
POSTGRES_PASSWORD: password
POSTGRES_DB: testbench
ports:
- 5432:5432
redis:
image: redis
ports:
- 6379:6379
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 10
- name: Cache Composer packages
uses: actions/cache@v5
with:
path: vendor
key: ${{ runner.os }}-php-${{ hashFiles('**/composer.lock') }}
restore-keys: |
${{ runner.os }}-php-
- name: Install dependencies
run: composer install --prefer-dist --no-progress
- name: Export GitHub Actions test defaults
run: |
{
echo "APP_KEY=base64:i3g6f+dV8FfsIkcxqd7gbiPn2oXk5r00sTmdD6V5utI="
echo "DB_HOST=127.0.0.1"
echo "DB_DATABASE=testbench"
echo "DB_USERNAME=root"
echo "DB_PASSWORD=password"
echo "QUEUE_FAILED_DRIVER=null"
echo "REDIS_HOST=127.0.0.1"
echo "REDIS_PASSWORD="
echo "REDIS_PORT=${{ job.services.redis.ports['6379'] }}"
# Testbench defaults cache to array (per-process, in-memory). The V2
# TaskWatchdog throttle set by Tests\TestCase::setUp needs to be
# visible to the two queue:work worker processes so their wake()
# poll short-circuits — array cache means the workers never see the
# test's throttle and runPass races the test's DB state (originally
# diagnosed as the #427 PG "hang" and the MySQL Mode-B flakes).
# The local docker stack uses tests/.env.feature which already sets
# CACHE_DRIVER=redis for the same reason; mirror it here.
echo "CACHE_DRIVER=redis"
echo "CACHE_STORE=redis"
} >> "$GITHUB_ENV"
- name: Run feature test suite (PostgreSQL shard ${{ matrix.shard }})
id: pgsql_suite
timeout-minutes: 65
run: |
mkdir -p build/test-results
php scripts/ci/split-feature-tests.php \
--dir=tests/Feature \
--shard=${{ matrix.shard }} \
--shards=2 \
--weights=.github/feature-test-timings.json \
--weight-profile=postgresql \
--summary="build/test-results/pgsql-shard-${{ matrix.shard }}-summary.txt" \
> /tmp/pgsql-shard-files
cat /tmp/pgsql-shard-files
cp /tmp/pgsql-shard-files "build/test-results/pgsql-shard-${{ matrix.shard }}-files.txt"
set +e
timeout --foreground 60m xargs -a /tmp/pgsql-shard-files \
vendor/bin/phpunit --testdox --testsuite feature \
--log-junit "build/test-results/pgsql-shard-${{ matrix.shard }}.xml"
status=$?
set -e
if [ -f "build/test-results/pgsql-shard-${{ matrix.shard }}.xml" ]; then
php scripts/ci/summarize-junit-times.php \
--input="build/test-results/pgsql-shard-${{ matrix.shard }}.xml" \
--output="build/test-results/pgsql-shard-${{ matrix.shard }}-timing.md" \
--csv="build/test-results/pgsql-shard-${{ matrix.shard }}-timing.csv"
fi
exit "$status"
env:
DB_CONNECTION: pgsql
DB_PORT: ${{ job.services.postgres.ports['5432'] }}
QUEUE_CONNECTION: redis
XDEBUG_MODE: off
# Safety net kept after #427: libpq honors PGOPTIONS on every
# connection, so any runaway query or lock wait fails loudly inside
# the step budget instead of consuming the whole 60m timeout. The
# numbers are well above legitimate test cost (the suite runs in
# ~13m) but well below the step budget, so a future regression
# surfaces with the offending SQL in laravel.log rather than a
# silent hang.
PGOPTIONS: "-c statement_timeout=120000 -c lock_timeout=30000 -c idle_in_transaction_session_timeout=60000"
- name: Upload PostgreSQL feature timing report
if: always()
uses: actions/upload-artifact@v4
with:
name: feature-timing-postgresql-${{ matrix.shard }}
path: build/test-results/pgsql-shard-${{ matrix.shard }}*
- name: Upload laravel.log if PostgreSQL tests fail
if: failure() || steps.pgsql_suite.outcome == 'failure'
uses: actions/upload-artifact@v4
with:
name: laravel-log-postgresql-${{ matrix.shard }}
path: vendor/orchestra/testbench-core/laravel/storage/logs/laravel.log
coverage:
runs-on: ubuntu-latest
services:
redis:
image: redis
ports:
- 6379:6379
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 10
- name: Cache Composer packages
uses: actions/cache@v5
with:
path: vendor
key: ${{ runner.os }}-php-${{ hashFiles('**/composer.lock') }}
restore-keys: |
${{ runner.os }}-php-
- name: Install dependencies
run: composer install --prefer-dist --no-progress
- name: Create SQLite database
run: touch testbench.sqlite
- name: Export coverage test defaults
run: |
{
echo "APP_KEY=base64:i3g6f+dV8FfsIkcxqd7gbiPn2oXk5r00sTmdD6V5utI="
echo "REDIS_HOST=127.0.0.1"
echo "REDIS_PASSWORD="
echo "REDIS_PORT=${{ job.services.redis.ports['6379'] }}"
echo "QUEUE_FAILED_DRIVER=null"
} >> "$GITHUB_ENV"
- name: Code Coverage
run: vendor/bin/phpunit --testdox --coverage-clover=coverage.xml --testsuite unit
env:
DB_CONNECTION: sqlite
DB_DATABASE: testbench.sqlite
QUEUE_CONNECTION: sync
XDEBUG_MODE: coverage
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v5
with:
files: ./coverage.xml
fail_ci_if_error: false
token: ${{ secrets.CODECOV_TOKEN }}
build:
runs-on: ubuntu-latest
needs:
- quality
- feature-mysql
- feature-postgresql
- coverage
if: always()
steps:
- name: Check required jobs
run: |
test "${{ needs.quality.result }}" = "success"
test "${{ needs.feature-mysql.result }}" = "success"
test "${{ needs.feature-postgresql.result }}" = "success"
test "${{ needs.coverage.result }}" = "success"