Skip to content

Commit 11d55d8

Browse files
committed
actions: experimental queries to detect CWE-1427
Add detection for prompt injection vulnerabilities (CWE-1427) in GitHub Actions workflows that use AI inference actions. New queries: - PromptInjectionCritical.ql: Detects user-controlled data flowing into AI prompts in privileged contexts (severity 9.0) - PromptInjectionMedium.ql: Detects prompt injection on non-privileged but externally triggerable events like pull_request (severity 5.0) New library: - PromptInjectionQuery.qll: Taint tracking from remote flow sources to MaD-defined prompt-injection sinks MaD model (prompt_injection_sinks.model.yml): - 30+ AI actions including actions/ai-inference, anthropics/claude-code-action, google-github-actions/run-gemini-cli, warpdotdev/oz-agent-action, and others ControlChecks.qll: Add 'prompt-injection' to control check categories
1 parent 16683ae commit 11d55d8

25 files changed

+688
-1
lines changed

actions/ql/lib/codeql/actions/security/ControlChecks.qll

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,8 @@ string any_category() {
55
[
66
"untrusted-checkout", "output-clobbering", "envpath-injection", "envvar-injection",
77
"command-injection", "argument-injection", "code-injection", "cache-poisoning",
8-
"untrusted-checkout-toctou", "artifact-poisoning", "artifact-poisoning-toctou"
8+
"untrusted-checkout-toctou", "artifact-poisoning", "artifact-poisoning-toctou",
9+
"prompt-injection"
910
]
1011
}
1112

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
/**
2+
* Provides classes and predicates for detecting prompt injection vulnerabilities
3+
* in GitHub Actions workflows that use AI inference actions.
4+
*
5+
* This library identifies:
6+
* - CWE-1427: User-controlled data flowing into AI model prompts without sanitization
7+
*/
8+
9+
private import actions
10+
private import codeql.actions.TaintTracking
11+
private import codeql.actions.dataflow.ExternalFlow
12+
import codeql.actions.dataflow.FlowSources
13+
import codeql.actions.DataFlow
14+
import codeql.actions.security.ControlChecks
15+
16+
/**
17+
* A sink for prompt injection vulnerabilities (CWE-1427).
18+
* Defined entirely through MaD extensible `actionsSinkModel` with kind `prompt-injection`.
19+
*/
20+
class PromptInjectionSink extends DataFlow::Node {
21+
PromptInjectionSink() { madSink(this, "prompt-injection") }
22+
}
23+
24+
/**
25+
* A source representing user-controlled data from repository_dispatch client_payload.
26+
* The client_payload can be set by anyone with write access to the repository
27+
* or via the GitHub API, making it a potential vector for injection attacks.
28+
*/
29+
class RepositoryDispatchClientPayloadSource extends RemoteFlowSource {
30+
string event;
31+
32+
RepositoryDispatchClientPayloadSource() {
33+
exists(Expression e |
34+
this.asExpr() = e and
35+
e.getExpression().matches("github.event.client_payload%") and
36+
event = e.getATriggerEvent().getName() and
37+
event = "repository_dispatch"
38+
)
39+
}
40+
41+
override string getSourceType() { result = "client_payload" }
42+
43+
override string getEventName() { result = event }
44+
}
45+
46+
/**
47+
* Gets the relevant event for a sink in a privileged context,
48+
* excluding sinks protected by control checks for the prompt-injection category.
49+
*/
50+
Event getRelevantEventForSink(DataFlow::Node sink) {
51+
inPrivilegedContext(sink.asExpr(), result) and
52+
not exists(ControlCheck check | check.protects(sink.asExpr(), result, "prompt-injection"))
53+
}
54+
55+
/**
56+
* Gets the relevant event for a prompt injection sink, including
57+
* repository_dispatch events which are externally triggerable via the GitHub API.
58+
*/
59+
Event getRelevantEventForPromptInjection(DataFlow::Node sink) {
60+
result = getRelevantEventForSink(sink)
61+
or
62+
exists(LocalJob job |
63+
job = sink.asExpr().getEnclosingJob() and
64+
job.getATriggerEvent() = result and
65+
result.getName() = "repository_dispatch"
66+
)
67+
}
68+
69+
/**
70+
* Holds when a critical-severity prompt injection path exists from source to sink.
71+
*/
72+
predicate criticalSeverityPromptInjection(
73+
PromptInjectionFlow::PathNode source, PromptInjectionFlow::PathNode sink, Event event
74+
) {
75+
PromptInjectionFlow::flowPath(source, sink) and
76+
event = getRelevantEventForPromptInjection(sink.getNode()) and
77+
source.getNode().(RemoteFlowSource).getEventName() = event.getName()
78+
}
79+
80+
/**
81+
* Gets the relevant event for a sink in any externally triggerable context,
82+
* excluding sinks protected by control checks for the prompt-injection category.
83+
* This is broader than `getRelevantEventForSink` — it includes non-privileged
84+
* events like `pull_request` where an attacker can still control event properties
85+
* (PR title, body, branch name) that flow into AI prompts.
86+
*/
87+
Event getRelevantEventForMediumSeverity(DataFlow::Node sink) {
88+
exists(LocalJob job |
89+
job = sink.asExpr().getEnclosingJob() and
90+
job.getATriggerEvent() = result and
91+
result.isExternallyTriggerable() and
92+
not inPrivilegedContext(sink.asExpr(), result) and
93+
not result.getName() = "repository_dispatch" and
94+
not exists(ControlCheck check | check.protects(sink.asExpr(), result, "prompt-injection"))
95+
)
96+
}
97+
98+
/**
99+
* Holds when a medium-severity prompt injection path exists from source to sink.
100+
* Covers non-privileged but externally triggerable events (e.g. pull_request)
101+
* where an attacker can control event properties that flow into AI prompts.
102+
*/
103+
predicate mediumSeverityPromptInjection(
104+
PromptInjectionFlow::PathNode source, PromptInjectionFlow::PathNode sink, Event event
105+
) {
106+
PromptInjectionFlow::flowPath(source, sink) and
107+
event = getRelevantEventForMediumSeverity(sink.getNode()) and
108+
source.getNode().(RemoteFlowSource).getEventName() = event.getName()
109+
}
110+
111+
/**
112+
* A taint-tracking configuration for unsafe user input
113+
* that is used to construct AI prompts (CWE-1427).
114+
*/
115+
private module PromptInjectionConfig implements DataFlow::ConfigSig {
116+
predicate isSource(DataFlow::Node source) { source instanceof RemoteFlowSource }
117+
118+
predicate isSink(DataFlow::Node sink) { sink instanceof PromptInjectionSink }
119+
120+
predicate observeDiffInformedIncrementalMode() { any() }
121+
122+
Location getASelectedSinkLocation(DataFlow::Node sink) {
123+
result = sink.getLocation()
124+
or
125+
result = getRelevantEventForPromptInjection(sink).getLocation()
126+
}
127+
}
128+
129+
/** Tracks flow of unsafe user input that is used to construct AI prompts. */
130+
module PromptInjectionFlow = TaintTracking::Global<PromptInjectionConfig>;
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
extensions:
2+
- addsTo:
3+
pack: codeql/actions-all
4+
extensible: actionsSinkModel
5+
# AI actions whose prompt/input parameters accept user-controllable data.
6+
# source: https://boostsecurityio.github.io/lotp/
7+
# source: https://github.com/marketplace?type=actions&category=ai-assisted
8+
data:
9+
# === GitHub official AI actions ===
10+
- ["actions/ai-inference", "*", "input.prompt", "prompt-injection", "manual"]
11+
- ["actions/ai-inference", "*", "input.system-prompt", "prompt-injection", "manual"]
12+
- ["github/ai-moderator", "*", "input.prompt", "prompt-injection", "manual"]
13+
- ["github/ai-moderator", "*", "input.custom-instructions", "prompt-injection", "manual"]
14+
# === Anthropic ===
15+
- ["anthropics/claude-code-action", "*", "input.prompt", "prompt-injection", "manual"]
16+
- ["anthropics/claude-code-action", "*", "input.direct_prompt", "prompt-injection", "manual"]
17+
- ["anthropics/claude-code-action", "*", "input.custom_instructions", "prompt-injection", "manual"]
18+
# === Google ===
19+
- ["google/gemini-code-assist-action", "*", "input.prompt", "prompt-injection", "manual"]
20+
- ["google-gemini/code-assist-action", "*", "input.prompt", "prompt-injection", "manual"]
21+
# === OpenAI / GPT ===
22+
- ["openai/chat-completion-action", "*", "input.prompt", "prompt-injection", "manual"]
23+
- ["openai/chat-completion-action", "*", "input.messages", "prompt-injection", "manual"]
24+
- ["di-sukharev/opencommit", "*", "input.prompt", "prompt-injection", "manual"]
25+
# === Community AI actions (marketplace) ===
26+
- ["quixio/quix-streams-ci-ai-review", "*", "input.prompt", "prompt-injection", "manual"]
27+
- ["rubberduck-ai/rubberduck-review-action", "*", "input.prompt", "prompt-injection", "manual"]
28+
- ["coderabbitai/ai-pr-reviewer", "*", "input.prompt", "prompt-injection", "manual"]
29+
- ["coderabbitai/ai-pr-reviewer", "*", "input.system_message", "prompt-injection", "manual"]
30+
- ["platisd/openai-pr-description", "*", "input.prompt", "prompt-injection", "manual"]
31+
- ["CodiumAI/pr-agent", "*", "input.prompt", "prompt-injection", "manual"]
32+
- ["arcee-ai/agent-action", "*", "input.prompt", "prompt-injection", "manual"]
33+
- ["langchain-ai/langsmith-action", "*", "input.prompt", "prompt-injection", "manual"]
34+
- ["abirismyname/create-discussion-with-ai", "*", "input.prompt", "prompt-injection", "manual"]
35+
- ["yousefed/ai-action", "*", "input.prompt", "prompt-injection", "manual"]
36+
- ["nickscamara/openai-github-action", "*", "input.prompt", "prompt-injection", "manual"]
37+
- ["austenstone/openai-completion-action", "*", "input.prompt", "prompt-injection", "manual"]
38+
- ["joshspicer/gpt-review", "*", "input.prompt", "prompt-injection", "manual"]
39+
- ["github/copilot-text-inference", "*", "input.prompt", "prompt-injection", "manual"]
40+
# === Google (GitHub Actions org) ===
41+
- ["google-github-actions/run-gemini-cli", "*", "input.prompt", "prompt-injection", "manual"]
42+
# === Warp ===
43+
- ["warpdotdev/oz-agent-action", "*", "input.prompt", "prompt-injection", "manual"]
44+
# === Generic AI action patterns (common parameter names) ===
45+
- ["togethercomputer/together-action", "*", "input.prompt", "prompt-injection", "manual"]
46+
- ["huggingface/inference-action", "*", "input.prompt", "prompt-injection", "manual"]
47+
- ["replicate/action", "*", "input.prompt", "prompt-injection", "manual"]
Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
## Overview
2+
3+
Passing user-controlled data into the prompt of an AI inference action allows an attacker to hijack the AI's behavior through **prompt injection**. Any workflow that feeds external input — issue titles, PR bodies, comments, or `repository_dispatch` payloads — directly into an AI prompt without sanitization is vulnerable to this class of attack.
4+
5+
When the AI action runs with access to secrets, write permissions, or code execution capabilities, a successful prompt injection can lead to secret exfiltration, unauthorized repository modifications, malicious package publication, or arbitrary command execution within the CI/CD environment.
6+
7+
## Recommendation
8+
9+
Never pass user-controlled data directly into AI prompt parameters. Instead:
10+
11+
- **Sanitize and truncate** user input before including it in prompts. Strip control characters and limit length.
12+
- **Use environment variables** with shell-native interpolation (e.g. `$TITLE` not `${{ ... }}`) to prevent expression injection.
13+
- **Restrict workflow permissions** to the minimum required (e.g. `issues: write`, `models: read` only).
14+
- **Use deployment environments** with required reviewers for workflows that invoke AI actions on external input.
15+
- **Validate AI output** before using it in subsequent steps — treat AI responses as untrusted data.
16+
17+
## Example
18+
19+
### Incorrect Usage
20+
21+
The following example passes unsanitized issue data directly into an AI prompt. An attacker can craft an issue title containing hidden instructions that cause the AI to ignore its system prompt, exfiltrate secrets via its response, or produce output that compromises downstream steps:
22+
23+
```yaml
24+
on:
25+
issues:
26+
types: [opened]
27+
28+
jobs:
29+
summary:
30+
runs-on: ubuntu-latest
31+
permissions:
32+
issues: write
33+
models: read
34+
steps:
35+
- name: Run AI inference
36+
uses: actions/ai-inference@v1
37+
with:
38+
prompt: |
39+
Summarize the following GitHub issue:
40+
Title: ${{ github.event.issue.title }}
41+
Body: ${{ github.event.issue.body }}
42+
```
43+
44+
### Correct Usage
45+
46+
The following example sanitizes and truncates user input before passing it to the AI, and uses environment variables to prevent expression injection:
47+
48+
```yaml
49+
on:
50+
issues:
51+
types: [opened]
52+
53+
jobs:
54+
summary:
55+
runs-on: ubuntu-latest
56+
permissions:
57+
issues: write
58+
models: read
59+
steps:
60+
- name: Sanitize input
61+
id: sanitize
62+
run: |
63+
SAFE_TITLE=$(echo "$TITLE" | head -c 200 | tr -dc '[:print:]')
64+
echo "title=$SAFE_TITLE" >> $GITHUB_OUTPUT
65+
env:
66+
TITLE: ${{ github.event.issue.title }}
67+
68+
- name: Run AI inference
69+
uses: actions/ai-inference@v1
70+
with:
71+
prompt: |
72+
Summarize the following GitHub issue title (user input has been sanitized):
73+
Title: ${{ steps.sanitize.outputs.title }}
74+
```
75+
76+
## References
77+
78+
- Common Weakness Enumeration: [CWE-1427](https://cwe.mitre.org/data/definitions/1427.html).
79+
- [OWASP LLM01: Prompt Injection](https://genai.owasp.org/llmrisk/llm01-prompt-injection/).
80+
- GitHub Docs: [Security hardening for GitHub Actions](https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions).
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
/**
2+
* @name Prompt injection from user-controlled Actions input
3+
* @description User-controlled data flowing into AI prompts in a privileged context
4+
* may allow attackers to manipulate AI behavior through prompt injection.
5+
* @kind path-problem
6+
* @problem.severity error
7+
* @security-severity 9.0
8+
* @precision high
9+
* @id actions/prompt-injection/critical
10+
* @tags actions
11+
* security
12+
* experimental
13+
* external/cwe/cwe-1427
14+
*/
15+
16+
import actions
17+
import codeql.actions.security.PromptInjectionQuery
18+
import PromptInjectionFlow::PathGraph
19+
20+
from PromptInjectionFlow::PathNode source, PromptInjectionFlow::PathNode sink, Event event
21+
where criticalSeverityPromptInjection(source, sink, event)
22+
select sink.getNode(), source, sink,
23+
"Potential prompt injection in $@, which may be controlled by an external user ($@).", sink,
24+
sink.getNode().asExpr().(Expression).getRawExpression(), event, event.getName()
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
## Overview
2+
3+
Passing user-controlled data into the prompt of an AI inference action on non-privileged but externally triggerable events such as `pull_request` allows an attacker to manipulate AI behavior through **prompt injection**. While the `pull_request` event does not grant write access to the base repository by default, the AI action may still reveal sensitive information, produce misleading output, or influence downstream processes that trust the AI's response.
4+
5+
This is a lower-severity variant of prompt injection (compared to privileged contexts like `issues`, `issue_comment`, or `pull_request_target`) because the attacker's ability to exploit the injection is limited by the reduced permissions of the triggering event.
6+
7+
## Recommendation
8+
9+
Apply the same mitigations as for critical-severity prompt injection:
10+
11+
- **Sanitize and truncate** user input before including it in prompts.
12+
- **Use environment variables** with shell-native interpolation instead of `${{ }}` expression syntax.
13+
- **Restrict workflow permissions** to the minimum required.
14+
- **Validate AI output** before using it in subsequent steps.
15+
16+
## Example
17+
18+
### Incorrect Usage
19+
20+
The following example passes the pull request title directly into an AI prompt on the `pull_request` event:
21+
22+
```yaml
23+
on:
24+
pull_request:
25+
types: [opened]
26+
27+
jobs:
28+
analyze:
29+
runs-on: ubuntu-latest
30+
steps:
31+
- name: AI analysis
32+
uses: actions/ai-inference@v1
33+
with:
34+
prompt: |
35+
Analyze this PR title:
36+
${{ github.event.pull_request.title }}
37+
```
38+
39+
### Correct Usage
40+
41+
The following example sanitizes the PR title before passing it to the AI:
42+
43+
```yaml
44+
on:
45+
pull_request:
46+
types: [opened]
47+
48+
jobs:
49+
analyze:
50+
runs-on: ubuntu-latest
51+
steps:
52+
- name: Sanitize input
53+
id: sanitize
54+
run: |
55+
SAFE_TITLE=$(echo "$TITLE" | head -c 200 | tr -dc '[:print:]')
56+
echo "title=$SAFE_TITLE" >> $GITHUB_OUTPUT
57+
env:
58+
TITLE: ${{ github.event.pull_request.title }}
59+
60+
- name: AI analysis
61+
uses: actions/ai-inference@v1
62+
with:
63+
prompt: |
64+
Analyze this PR title (sanitized):
65+
${{ steps.sanitize.outputs.title }}
66+
```
67+
68+
## References
69+
70+
- Common Weakness Enumeration: [CWE-1427](https://cwe.mitre.org/data/definitions/1427.html).
71+
- [OWASP LLM01: Prompt Injection](https://genai.owasp.org/llmrisk/llm01-prompt-injection/).
72+
- GitHub Docs: [Security hardening for GitHub Actions](https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions).
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
/**
2+
* @name Prompt injection from user-controlled Actions input (medium severity)
3+
* @description User-controlled data flowing into AI prompts on non-privileged
4+
* but externally triggerable events (e.g. pull_request) may allow
5+
* attackers to manipulate AI behavior through prompt injection.
6+
* @kind path-problem
7+
* @problem.severity warning
8+
* @security-severity 5.0
9+
* @precision medium
10+
* @id actions/prompt-injection/medium
11+
* @tags actions
12+
* security
13+
* experimental
14+
* external/cwe/cwe-1427
15+
*/
16+
17+
import actions
18+
import codeql.actions.security.PromptInjectionQuery
19+
import PromptInjectionFlow::PathGraph
20+
21+
from PromptInjectionFlow::PathNode source, PromptInjectionFlow::PathNode sink, Event event
22+
where mediumSeverityPromptInjection(source, sink, event)
23+
select sink.getNode(), source, sink,
24+
"Potential prompt injection in $@, which may be controlled by an external user ($@).", sink,
25+
sink.getNode().asExpr().(Expression).getRawExpression(), event, event.getName()

0 commit comments

Comments
 (0)