Skip to content

Commit ecbd71c

Browse files
committed
Add CI workflow and enforce formatting
1 parent 3e66c53 commit ecbd71c

28 files changed

+1170
-795
lines changed

.github/workflows/ci.yml

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
name: CI
2+
3+
on:
4+
pull_request:
5+
push:
6+
branches:
7+
- main
8+
9+
env:
10+
CARGO_TERM_COLOR: always
11+
12+
jobs:
13+
lint:
14+
runs-on: ubuntu-latest
15+
steps:
16+
- uses: actions/checkout@v4
17+
- uses: dtolnay/rust-toolchain@stable
18+
with:
19+
components: rustfmt, clippy
20+
- uses: Swatinem/rust-cache@v2
21+
- name: Format
22+
run: cargo fmt -- --check
23+
- name: Clippy
24+
run: cargo clippy -- -D warnings
25+
26+
test:
27+
runs-on: ${{ matrix.os }}
28+
strategy:
29+
matrix:
30+
os: [ubuntu-latest, macos-latest, windows-latest]
31+
steps:
32+
- uses: actions/checkout@v4
33+
- uses: dtolnay/rust-toolchain@stable
34+
- uses: Swatinem/rust-cache@v2
35+
- name: Test
36+
run: cargo test

.github/workflows/diffscope.yml

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,17 +7,24 @@ permissions:
77
contents: read
88
pull-requests: write
99

10+
concurrency:
11+
group: diffscope-${{ github.event.pull_request.number }}
12+
cancel-in-progress: true
13+
1014
jobs:
1115
review:
1216
runs-on: ubuntu-latest
17+
if: github.event.pull_request.head.repo.full_name == github.repository
1318
steps:
1419
- uses: actions/checkout@v4
1520
with:
1621
fetch-depth: 0
22+
ref: ${{ github.event.pull_request.head.sha }}
1723

1824
- name: Get PR diff
1925
id: diff
2026
run: |
27+
git fetch origin ${{ github.base_ref }} --depth=1
2128
git diff origin/${{ github.base_ref }}...HEAD > pr.diff
2229
2330
- name: Run DiffScope
@@ -33,15 +40,19 @@ jobs:
3340
script: |
3441
const fs = require('fs');
3542
const comments = JSON.parse(fs.readFileSync('comments.json', 'utf8'));
43+
const headSha = context.payload.pull_request.head.sha;
3644
3745
for (const comment of comments) {
46+
if (!comment.file_path || !comment.line_number || comment.line_number < 1) {
47+
continue;
48+
}
3849
await github.rest.pulls.createReviewComment({
3950
owner: context.repo.owner,
4051
repo: context.repo.repo,
4152
pull_number: context.issue.number,
4253
body: `**${comment.severity}**: ${comment.content}`,
43-
commit_id: context.sha,
54+
commit_id: headSha,
4455
path: comment.file_path,
4556
line: comment.line_number
4657
});
47-
}
58+
}

src/adapters/anthropic.rs

Lines changed: 36 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1+
use crate::adapters::llm::{LLMAdapter, LLMRequest, LLMResponse, ModelConfig, Usage};
12
use anyhow::{Context, Result};
23
use async_trait::async_trait;
34
use reqwest::{Client, StatusCode};
45
use serde::{Deserialize, Serialize};
56
use std::time::Duration;
67
use tokio::time::sleep;
7-
use crate::adapters::llm::{LLMAdapter, LLMRequest, LLMResponse, ModelConfig, Usage};
88

99
pub struct AnthropicAdapter {
1010
client: Client,
@@ -53,14 +53,16 @@ impl AnthropicAdapter {
5353
let api_key = config.api_key.clone()
5454
.or_else(|| std::env::var("ANTHROPIC_API_KEY").ok())
5555
.context("Anthropic API key not found. Set ANTHROPIC_API_KEY environment variable or provide in config")?;
56-
57-
let base_url = config.base_url.clone()
56+
57+
let base_url = config
58+
.base_url
59+
.clone()
5860
.unwrap_or_else(|| "https://api.anthropic.com/v1".to_string());
59-
61+
6062
let client = Client::builder()
6163
.timeout(std::time::Duration::from_secs(60))
6264
.build()?;
63-
65+
6466
Ok(Self {
6567
client,
6668
config,
@@ -109,38 +111,40 @@ impl AnthropicAdapter {
109111
#[async_trait]
110112
impl LLMAdapter for AnthropicAdapter {
111113
async fn complete(&self, request: LLMRequest) -> Result<LLMResponse> {
112-
let messages = vec![
113-
Message {
114-
role: "user".to_string(),
115-
content: request.user_prompt,
116-
},
117-
];
118-
114+
let messages = vec![Message {
115+
role: "user".to_string(),
116+
content: request.user_prompt,
117+
}];
118+
119119
let anthropic_request = AnthropicRequest {
120120
model: self.config.model_name.clone(),
121121
messages,
122122
max_tokens: request.max_tokens.unwrap_or(self.config.max_tokens),
123123
temperature: request.temperature.unwrap_or(self.config.temperature),
124124
system: request.system_prompt,
125125
};
126-
126+
127127
let url = format!("{}/messages", self.base_url);
128-
let response = self.send_with_retry(|| {
129-
self.client
130-
.post(&url)
131-
.header("x-api-key", &self.api_key)
132-
.header("anthropic-version", "2023-06-01")
133-
.header("anthropic-beta", "messages-2023-12-15")
134-
.header("Content-Type", "application/json")
135-
.json(&anthropic_request)
136-
})
137-
.await
138-
.context("Failed to send request to Anthropic")?;
139-
140-
let anthropic_response: AnthropicResponse = response.json().await
128+
let response = self
129+
.send_with_retry(|| {
130+
self.client
131+
.post(&url)
132+
.header("x-api-key", &self.api_key)
133+
.header("anthropic-version", "2023-06-01")
134+
.header("anthropic-beta", "messages-2023-12-15")
135+
.header("Content-Type", "application/json")
136+
.json(&anthropic_request)
137+
})
138+
.await
139+
.context("Failed to send request to Anthropic")?;
140+
141+
let anthropic_response: AnthropicResponse = response
142+
.json()
143+
.await
141144
.context("Failed to parse Anthropic response")?;
142-
143-
let content = anthropic_response.content
145+
146+
let content = anthropic_response
147+
.content
144148
.first()
145149
.map(|c| {
146150
// Verify it's a text content type
@@ -151,18 +155,19 @@ impl LLMAdapter for AnthropicAdapter {
151155
}
152156
})
153157
.unwrap_or_default();
154-
158+
155159
Ok(LLMResponse {
156160
content,
157161
model: anthropic_response.model,
158162
usage: Some(Usage {
159163
prompt_tokens: anthropic_response.usage.input_tokens,
160164
completion_tokens: anthropic_response.usage.output_tokens,
161-
total_tokens: anthropic_response.usage.input_tokens + anthropic_response.usage.output_tokens,
165+
total_tokens: anthropic_response.usage.input_tokens
166+
+ anthropic_response.usage.output_tokens,
162167
}),
163168
})
164169
}
165-
170+
166171
fn _model_name(&self) -> &str {
167172
&self.config.model_name
168173
}

src/adapters/llm.rs

Lines changed: 28 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -54,30 +54,37 @@ pub trait LLMAdapter: Send + Sync {
5454
pub fn create_adapter(config: &ModelConfig) -> Result<Box<dyn LLMAdapter>> {
5555
match config.model_name.as_str() {
5656
// Anthropic Claude models (all versions)
57-
name if name.starts_with("claude-") => {
58-
Ok(Box::new(crate::adapters::AnthropicAdapter::new(config.clone())?))
59-
}
57+
name if name.starts_with("claude-") => Ok(Box::new(
58+
crate::adapters::AnthropicAdapter::new(config.clone())?,
59+
)),
6060
// Legacy claude naming without dash
61-
name if name.starts_with("claude") => {
62-
Ok(Box::new(crate::adapters::AnthropicAdapter::new(config.clone())?))
63-
}
61+
name if name.starts_with("claude") => Ok(Box::new(crate::adapters::AnthropicAdapter::new(
62+
config.clone(),
63+
)?)),
6464
// OpenAI models
65-
name if name.starts_with("gpt-") => {
66-
Ok(Box::new(crate::adapters::OpenAIAdapter::new(config.clone())?))
67-
}
68-
name if name.starts_with("o1-") => {
69-
Ok(Box::new(crate::adapters::OpenAIAdapter::new(config.clone())?))
70-
}
65+
name if name.starts_with("gpt-") => Ok(Box::new(crate::adapters::OpenAIAdapter::new(
66+
config.clone(),
67+
)?)),
68+
name if name.starts_with("o1-") => Ok(Box::new(crate::adapters::OpenAIAdapter::new(
69+
config.clone(),
70+
)?)),
7171
// Ollama models
72-
name if name.starts_with("ollama:") => {
73-
Ok(Box::new(crate::adapters::OllamaAdapter::new(config.clone())?))
74-
}
75-
_name if config.base_url.as_ref().map_or(false, |u| u.contains("11434")) => {
76-
Ok(Box::new(crate::adapters::OllamaAdapter::new(config.clone())?))
72+
name if name.starts_with("ollama:") => Ok(Box::new(crate::adapters::OllamaAdapter::new(
73+
config.clone(),
74+
)?)),
75+
_name
76+
if config
77+
.base_url
78+
.as_ref()
79+
.map_or(false, |u| u.contains("11434")) =>
80+
{
81+
Ok(Box::new(crate::adapters::OllamaAdapter::new(
82+
config.clone(),
83+
)?))
7784
}
7885
// Default to OpenAI for unknown models
79-
_ => {
80-
Ok(Box::new(crate::adapters::OpenAIAdapter::new(config.clone())?))
81-
}
86+
_ => Ok(Box::new(crate::adapters::OpenAIAdapter::new(
87+
config.clone(),
88+
)?)),
8289
}
83-
}
90+
}

src/adapters/mod.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1+
pub mod anthropic;
12
pub mod llm;
2-
pub mod openai;
33
pub mod ollama;
4-
pub mod anthropic;
4+
pub mod openai;
55

6-
pub use openai::OpenAIAdapter;
6+
pub use anthropic::AnthropicAdapter;
77
pub use ollama::OllamaAdapter;
8-
pub use anthropic::AnthropicAdapter;
8+
pub use openai::OpenAIAdapter;

src/adapters/ollama.rs

Lines changed: 23 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1+
use crate::adapters::llm::{LLMAdapter, LLMRequest, LLMResponse, ModelConfig, Usage};
12
use anyhow::{Context, Result};
23
use async_trait::async_trait;
34
use reqwest::{Client, StatusCode};
45
use serde::{Deserialize, Serialize};
56
use std::time::Duration;
67
use tokio::time::sleep;
7-
use crate::adapters::llm::{LLMAdapter, LLMRequest, LLMResponse, ModelConfig, Usage};
88

99
pub struct OllamaAdapter {
1010
client: Client,
@@ -35,13 +35,15 @@ struct OllamaResponse {
3535

3636
impl OllamaAdapter {
3737
pub fn new(config: ModelConfig) -> Result<Self> {
38-
let base_url = config.base_url.clone()
38+
let base_url = config
39+
.base_url
40+
.clone()
3941
.unwrap_or_else(|| "http://localhost:11434".to_string());
40-
42+
4143
let client = Client::builder()
4244
.timeout(std::time::Duration::from_secs(300))
4345
.build()?;
44-
46+
4547
Ok(Self {
4648
client,
4749
config,
@@ -89,10 +91,12 @@ impl OllamaAdapter {
8991
#[async_trait]
9092
impl LLMAdapter for OllamaAdapter {
9193
async fn complete(&self, request: LLMRequest) -> Result<LLMResponse> {
92-
let model_name = self.config.model_name
94+
let model_name = self
95+
.config
96+
.model_name
9397
.strip_prefix("ollama:")
9498
.unwrap_or(&self.config.model_name);
95-
99+
96100
let ollama_request = OllamaRequest {
97101
model: model_name.to_string(),
98102
prompt: request.user_prompt,
@@ -101,34 +105,34 @@ impl LLMAdapter for OllamaAdapter {
101105
num_predict: request.max_tokens.unwrap_or(self.config.max_tokens),
102106
stream: false,
103107
};
104-
108+
105109
let url = format!("{}/api/generate", self.base_url);
106-
let response = self.send_with_retry(|| {
107-
self.client
108-
.post(&url)
109-
.json(&ollama_request)
110-
})
111-
.await
112-
.context("Failed to send request to Ollama")?;
113-
114-
let ollama_response: OllamaResponse = response.json().await
110+
let response = self
111+
.send_with_retry(|| self.client.post(&url).json(&ollama_request))
112+
.await
113+
.context("Failed to send request to Ollama")?;
114+
115+
let ollama_response: OllamaResponse = response
116+
.json()
117+
.await
115118
.context("Failed to parse Ollama response")?;
116-
119+
117120
Ok(LLMResponse {
118121
content: ollama_response.response,
119122
model: ollama_response.model,
120123
usage: if ollama_response.done {
121124
Some(Usage {
122125
prompt_tokens: ollama_response.prompt_eval_count.unwrap_or(0),
123126
completion_tokens: ollama_response.eval_count.unwrap_or(0),
124-
total_tokens: ollama_response.prompt_eval_count.unwrap_or(0) + ollama_response.eval_count.unwrap_or(0),
127+
total_tokens: ollama_response.prompt_eval_count.unwrap_or(0)
128+
+ ollama_response.eval_count.unwrap_or(0),
125129
})
126130
} else {
127131
None
128132
},
129133
})
130134
}
131-
135+
132136
fn _model_name(&self) -> &str {
133137
&self.config.model_name
134138
}

0 commit comments

Comments
 (0)