Skip to content

Commit afff03e

Browse files
haasonsaasclaude
andcommitted
Add Greptile-inspired settings: configurable timeouts, retries, language, file limits, auto-instructions
New config fields (all with sensible defaults, fully backward-compatible): - adapter_timeout_secs: HTTP timeout for LLM requests (was hardcoded 60/300s) - adapter_max_retries / adapter_retry_delay_ms: retry logic (was hardcoded 2/250ms) - file_change_limit: skip massive PRs exceeding N files - output_language: review output locale (e.g., "ja", "de") - include_fix_suggestions: toggle AI fix suggestions on/off - auto_detect_instructions: absorb .cursorrules, CLAUDE.md, agents.md automatically - feedback_suppression_threshold / feedback_suppression_margin: tune adaptive learning New CLI flags: --timeout, --max-retries, --file-change-limit, --output-language Implementation: - RetryConfig struct replaces hardcoded constants in adapters/common.rs - All 3 adapters (OpenAI, Anthropic, Ollama) now use configurable timeout/retry - Pipeline injects auto-detected instruction files into system prompt - Review guidance includes output language directive and fix suggestion toggle - Feedback suppression thresholds flow from config into filter logic - All new fields normalized with bounds (timeout: 5-600s, retries: 0-10, etc.) - 11 new tests covering normalization, guidance, and instruction detection Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent ee1ad36 commit afff03e

File tree

9 files changed

+422
-29
lines changed

9 files changed

+422
-29
lines changed

src/adapters/anthropic.rs

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ pub struct AnthropicAdapter {
1010
config: ModelConfig,
1111
api_key: String,
1212
base_url: String,
13+
retry_config: common::RetryConfig,
1314
}
1415

1516
#[derive(Serialize)]
@@ -61,15 +62,23 @@ impl AnthropicAdapter {
6162
.or_else(|| if is_local { Some(String::new()) } else { None })
6263
.context("Anthropic API key not found. Set ANTHROPIC_API_KEY environment variable or provide in config")?;
6364

65+
let default_timeout = if is_local { 300 } else { 60 };
66+
let timeout_secs = config.timeout_secs.unwrap_or(default_timeout);
6467
let client = Client::builder()
65-
.timeout(std::time::Duration::from_secs(if is_local { 300 } else { 60 }))
68+
.timeout(std::time::Duration::from_secs(timeout_secs))
6669
.build()?;
6770

71+
let retry_config = common::RetryConfig {
72+
max_retries: config.max_retries.unwrap_or(2),
73+
base_delay_ms: config.retry_delay_ms.unwrap_or(250),
74+
};
75+
6876
Ok(Self {
6977
client,
7078
config,
7179
api_key,
7280
base_url,
81+
retry_config,
7382
})
7483
}
7584

@@ -92,7 +101,7 @@ impl LLMAdapter for AnthropicAdapter {
92101
};
93102

94103
let url = format!("{}/messages", self.base_url);
95-
let response = common::send_with_retry("Anthropic", || {
104+
let response = common::send_with_retry_config("Anthropic", &self.retry_config, &mut || {
96105
self.client
97106
.post(&url)
98107
.header("x-api-key", &self.api_key)
@@ -153,6 +162,7 @@ mod tests {
153162
max_tokens: 100,
154163
openai_use_responses: None,
155164
adapter_override: None,
165+
..Default::default()
156166
}
157167
}
158168

src/adapters/common.rs

Lines changed: 31 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -5,21 +5,39 @@ use std::time::Duration;
55
use tokio::time::sleep;
66
use url::Url;
77

8-
/// Send an HTTP request with retry logic for transient failures.
9-
///
10-
/// Retries up to 2 times on retryable status codes (429, 5xx) or connection errors,
11-
/// with linear backoff starting at 250ms.
12-
pub async fn send_with_retry<F>(
8+
/// Default retry parameters.
9+
const DEFAULT_MAX_RETRIES: usize = 2;
10+
const DEFAULT_BASE_DELAY_MS: u64 = 250;
11+
12+
/// Configurable retry parameters for LLM adapter requests.
13+
#[derive(Debug, Clone)]
14+
pub struct RetryConfig {
15+
pub max_retries: usize,
16+
pub base_delay_ms: u64,
17+
}
18+
19+
impl Default for RetryConfig {
20+
fn default() -> Self {
21+
Self {
22+
max_retries: DEFAULT_MAX_RETRIES,
23+
base_delay_ms: DEFAULT_BASE_DELAY_MS,
24+
}
25+
}
26+
}
27+
28+
/// Send an HTTP request with configurable retry parameters.
29+
pub async fn send_with_retry_config<F>(
1330
adapter_name: &str,
14-
mut make_request: F,
31+
retry_config: &RetryConfig,
32+
make_request: &mut F,
1533
) -> Result<reqwest::Response>
1634
where
1735
F: FnMut() -> reqwest::RequestBuilder,
1836
{
19-
const MAX_RETRIES: usize = 2;
20-
const BASE_DELAY_MS: u64 = 250;
37+
let max_retries = retry_config.max_retries;
38+
let base_delay_ms = retry_config.base_delay_ms;
2139

22-
for attempt in 0..=MAX_RETRIES {
40+
for attempt in 0..=max_retries {
2341
match make_request().send().await {
2442
Ok(response) => {
2543
if response.status().is_success() {
@@ -28,8 +46,8 @@ where
2846

2947
let status = response.status();
3048
let body = response.text().await.unwrap_or_default();
31-
if is_retryable_status(status) && attempt < MAX_RETRIES {
32-
sleep(Duration::from_millis(BASE_DELAY_MS * (attempt as u64 + 1))).await;
49+
if is_retryable_status(status) && attempt < max_retries {
50+
sleep(Duration::from_millis(base_delay_ms * (attempt as u64 + 1))).await;
3351
continue;
3452
}
3553

@@ -43,8 +61,8 @@ where
4361
);
4462
}
4563
Err(err) => {
46-
if attempt < MAX_RETRIES {
47-
sleep(Duration::from_millis(BASE_DELAY_MS * (attempt as u64 + 1))).await;
64+
if attempt < max_retries {
65+
sleep(Duration::from_millis(base_delay_ms * (attempt as u64 + 1))).await;
4866
continue;
4967
}
5068
return Err(err.into());

src/adapters/llm.rs

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,15 @@ pub struct ModelConfig {
1212
pub openai_use_responses: Option<bool>,
1313
#[serde(default)]
1414
pub adapter_override: Option<String>,
15+
/// Override HTTP timeout in seconds.
16+
#[serde(default)]
17+
pub timeout_secs: Option<u64>,
18+
/// Override max retries for transient failures.
19+
#[serde(default)]
20+
pub max_retries: Option<usize>,
21+
/// Override base delay between retries in milliseconds.
22+
#[serde(default)]
23+
pub retry_delay_ms: Option<u64>,
1524
}
1625

1726
impl Default for ModelConfig {
@@ -24,6 +33,9 @@ impl Default for ModelConfig {
2433
max_tokens: 4000,
2534
openai_use_responses: None,
2635
adapter_override: None,
36+
timeout_secs: None,
37+
max_retries: None,
38+
retry_delay_ms: None,
2739
}
2840
}
2941
}

src/adapters/ollama.rs

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ pub struct OllamaAdapter {
99
client: Client,
1010
config: ModelConfig,
1111
base_url: String,
12+
retry_config: common::RetryConfig,
1213
}
1314

1415
// -- Chat API types (primary) --
@@ -100,14 +101,21 @@ impl OllamaAdapter {
100101
.clone()
101102
.unwrap_or_else(|| "http://localhost:11434".to_string());
102103

104+
let timeout_secs = config.timeout_secs.unwrap_or(300);
103105
let client = Client::builder()
104-
.timeout(std::time::Duration::from_secs(300))
106+
.timeout(std::time::Duration::from_secs(timeout_secs))
105107
.build()?;
106108

109+
let retry_config = common::RetryConfig {
110+
max_retries: config.max_retries.unwrap_or(2),
111+
base_delay_ms: config.retry_delay_ms.unwrap_or(250),
112+
};
113+
107114
Ok(Self {
108115
client,
109116
config,
110117
base_url,
118+
retry_config,
111119
})
112120
}
113121

@@ -190,7 +198,7 @@ impl LLMAdapter for OllamaAdapter {
190198
};
191199

192200
let url = format!("{}/api/chat", self.base_url);
193-
let response = common::send_with_retry("Ollama", || {
201+
let response = common::send_with_retry_config("Ollama", &self.retry_config, &mut || {
194202
self.client.post(&url).json(&chat_request)
195203
})
196204
.await
@@ -236,6 +244,7 @@ mod tests {
236244
max_tokens: 100,
237245
openai_use_responses: None,
238246
adapter_override: None,
247+
..Default::default()
239248
}
240249
}
241250

src/adapters/openai.rs

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ pub struct OpenAIAdapter {
1010
config: ModelConfig,
1111
api_key: String,
1212
base_url: String,
13+
retry_config: common::RetryConfig,
1314
}
1415

1516
#[derive(Serialize)]
@@ -105,15 +106,23 @@ impl OpenAIAdapter {
105106
"OpenAI API key not found. Set OPENAI_API_KEY environment variable or provide in config"
106107
})?;
107108

109+
let default_timeout = if is_local { 300 } else { 60 };
110+
let timeout_secs = config.timeout_secs.unwrap_or(default_timeout);
108111
let client = Client::builder()
109-
.timeout(std::time::Duration::from_secs(if is_local { 300 } else { 60 }))
112+
.timeout(std::time::Duration::from_secs(timeout_secs))
110113
.build()?;
111114

115+
let retry_config = common::RetryConfig {
116+
max_retries: config.max_retries.unwrap_or(2),
117+
base_delay_ms: config.retry_delay_ms.unwrap_or(250),
118+
};
119+
112120
Ok(Self {
113121
client,
114122
config,
115123
api_key,
116124
base_url,
125+
retry_config,
117126
})
118127
}
119128

@@ -169,7 +178,7 @@ impl OpenAIAdapter {
169178
};
170179

171180
let url = format!("{}/chat/completions", self.base_url);
172-
let response = common::send_with_retry("OpenAI", || {
181+
let response = common::send_with_retry_config("OpenAI", &self.retry_config, &mut || {
173182
self.client
174183
.post(&url)
175184
.header("Authorization", format!("Bearer {}", self.api_key))
@@ -211,7 +220,7 @@ impl OpenAIAdapter {
211220
};
212221

213222
let url = format!("{}/responses", self.base_url);
214-
let response = common::send_with_retry("OpenAI", || {
223+
let response = common::send_with_retry_config("OpenAI", &self.retry_config, &mut || {
215224
self.client
216225
.post(&url)
217226
.header("Authorization", format!("Bearer {}", self.api_key))
@@ -277,6 +286,7 @@ mod tests {
277286
max_tokens: 100,
278287
openai_use_responses: Some(false),
279288
adapter_override: None,
289+
..Default::default()
280290
}
281291
}
282292

0 commit comments

Comments
 (0)