-
Notifications
You must be signed in to change notification settings - Fork 0
131 lines (113 loc) · 4.35 KB
/
test_evaluation_function.yml
File metadata and controls
131 lines (113 loc) · 4.35 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# .github/workflows/test_endpoint.yml
name: Endpoint Validation Test
on:
workflow_call:
inputs:
eval_function:
description: 'Evaluation Function Name'
required: true
type: string
sql_limit:
description: 'Max number of records to fetch'
required: false
type: number
default: 1000
secrets:
# Explicitly declare the secrets the reusable workflow needs
TEST_API_ENDPOINT:
description: 'API Endpoint URL to test'
required: false
type: string
DB_USER:
required: false
DB_PASSWORD:
required: false
DB_HOST:
required: false
DB_PORT:
required: false
DB_NAME:
required: false
jobs:
run_test:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v4
- name: Set up Python Environment
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install Dependencies
run: pip install -r requirements.txt
- name: Run Test Script and Capture Results
id: run_script
env:
# --- Database Secrets (Loaded from GitHub Secrets) ---
DB_USER: ${{ secrets.DB_USER }}
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
DB_HOST: ${{ secrets.DB_HOST }}
DB_PORT: ${{ secrets.DB_PORT }}
DB_NAME: ${{ secrets.DB_NAME }}
# --- Configuration ---
LOG_LEVEL: DEBUG
SQL_LIMIT: ${{ inputs.sql_limit }}
run: |
# 1. Prepare event payload as JSON for the lambda_handler
EVENT_PAYLOAD='{
"endpoint": "${{ inputs.endpoint }}",
"eval_function_name": "${{ inputs.eval_function }}",
"grade_params_json": "",
"sql_limit": ${{ inputs.sql_limit }}
}'
# 2. Execute script and pipe the final JSON summary to a file
python3 test_endpoint.py <<< "$EVENT_PAYLOAD" > report_data.json
# 3. Read the output file to get the JSON data
REPORT_DATA=$(cat report_data.json)
# 4. Extract key metrics and set them as step outputs for later steps
echo "error_count=$(echo $REPORT_DATA | jq -r '.number_of_errors')" >> $GITHUB_OUTPUT
echo "csv_filename=$(echo $REPORT_DATA | jq -r '.csv_filename')" >> $GITHUB_OUTPUT
# 5. Fail the step if errors were found (optional but good practice)
ERROR_COUNT=$(echo $REPORT_DATA | jq -r '.number_of_errors')
if [ "$ERROR_COUNT" -gt 0 ]; then
echo "::error file=test_endpoint.py::Test completed with $ERROR_COUNT errors."
fi
- name: 📊 Create Job Summary Report
run: |
REPORT_DATA=$(cat report_data.json)
PASSES=$(echo $REPORT_DATA | jq -r '.pass_count')
TOTAL=$(echo $REPORT_DATA | jq -r '.total_count')
ERRORS=$(echo $REPORT_DATA | jq -r '.number_of_errors')
PASS_RATE=$(echo "scale=2; $PASSES / $TOTAL * 100" | bc -l)
STATUS_EMOJI="✅"
if [ "$ERRORS" -gt 0 ]; then
STATUS_EMOJI="❌"
fi
# Write the markdown report to the special Job Summary file
{
echo "## ${STATUS_EMOJI} Endpoint Validation Report"
echo "---"
echo "**Endpoint:** ${{ inputs.endpoint }}"
echo "**Evaluation Function:** ${{ inputs.eval_function }}"
echo ""
echo "| Metric | Value |"
echo "| :--- | :--- |"
echo "| **Total Tests** | ${TOTAL} |"
echo "| **Passed** | ${PASSES} |"
echo "| **Failed** | **${ERRORS}** |"
echo "| **Pass Rate** | ${PASS_RATE}% |"
echo "---"
if [ "$ERRORS" -gt 0 ]; then
echo "⚠ **${ERRORS} Mismatches/Errors Found.** Detailed report attached below."
else
echo "🎉 All tests passed successfully!"
fi
} >> $GITHUB_STEP_SUMMARY
shell: bash
- name: 📦 Upload Error CSV Artifact
if: steps.run_script.outputs.csv_filename != ''
uses: actions/upload-artifact@v4
with:
name: ${{ inputs.eval_function }}_error_report
path: ${{ steps.run_script.outputs.csv_filename }}
retention-days: 7