Skip to content

Commit 037562e

Browse files
haasonsaasclaude
andcommitted
fix: apply cargo fmt formatting to storage backend files
Fixes CI lint failures from formatting inconsistencies in the PostgreSQL storage backend and related modules. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent e84e353 commit 037562e

File tree

4 files changed

+182
-93
lines changed

4 files changed

+182
-93
lines changed

src/server/api.rs

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -94,8 +94,16 @@ impl ListEventsParams {
9494
source: self.source,
9595
model: self.model,
9696
status: self.status,
97-
time_from: self.time_from.and_then(|s| chrono::DateTime::parse_from_rfc3339(&s).ok().map(|t| t.with_timezone(&chrono::Utc))),
98-
time_to: self.time_to.and_then(|s| chrono::DateTime::parse_from_rfc3339(&s).ok().map(|t| t.with_timezone(&chrono::Utc))),
97+
time_from: self.time_from.and_then(|s| {
98+
chrono::DateTime::parse_from_rfc3339(&s)
99+
.ok()
100+
.map(|t| t.with_timezone(&chrono::Utc))
101+
}),
102+
time_to: self.time_to.and_then(|s| {
103+
chrono::DateTime::parse_from_rfc3339(&s)
104+
.ok()
105+
.map(|t| t.with_timezone(&chrono::Utc))
106+
}),
99107
github_repo: self.github_repo,
100108
limit: self.limit,
101109
offset: self.offset,
@@ -625,7 +633,14 @@ pub async fn submit_feedback(
625633
AppState::save_reviews_async(&state);
626634

627635
// Persist feedback to storage backend
628-
let _ = state.storage.update_comment_feedback(&id, &comment_id_for_storage, if is_accepted { "accept" } else { "reject" }).await;
636+
let _ = state
637+
.storage
638+
.update_comment_feedback(
639+
&id,
640+
&comment_id_for_storage,
641+
if is_accepted { "accept" } else { "reject" },
642+
)
643+
.await;
629644

630645
// Record in convention store for learned patterns
631646
let config = state.config.read().await;

src/server/state.rs

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,10 @@ impl AppState {
185185
if pg.is_empty().await? && storage_path.exists() {
186186
let json_reviews = Self::load_reviews_from_disk(&storage_path);
187187
if !json_reviews.is_empty() {
188-
info!("Migrating {} reviews from JSON to PostgreSQL...", json_reviews.len());
188+
info!(
189+
"Migrating {} reviews from JSON to PostgreSQL...",
190+
json_reviews.len()
191+
);
189192
for (_id, session) in &json_reviews {
190193
if let Err(e) = pg.save_review(session).await {
191194
tracing::warn!("Failed to migrate review {}: {}", session.id, e);

src/server/storage_json.rs

Lines changed: 64 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -106,12 +106,7 @@ impl StorageBackend for JsonStorageBackend {
106106
list.sort_by(|a, b| b.started_at.cmp(&a.started_at));
107107
let offset = offset as usize;
108108
let limit = limit as usize;
109-
Ok(list
110-
.into_iter()
111-
.skip(offset)
112-
.take(limit)
113-
.cloned()
114-
.collect())
109+
Ok(list.into_iter().skip(offset).take(limit).cloned().collect())
115110
}
116111

117112
async fn delete_review(&self, id: &str) -> anyhow::Result<()> {
@@ -143,16 +138,17 @@ impl StorageBackend for JsonStorageBackend {
143138
.as_ref()
144139
.map_or(true, |f| e.model.eq_ignore_ascii_case(f));
145140
let status_ok = filters.status.as_ref().map_or(true, |f| {
146-
e.event_type
147-
.eq_ignore_ascii_case(&format!("review.{}", f))
141+
e.event_type.eq_ignore_ascii_case(&format!("review.{}", f))
148142
});
149143
// Time filters (best-effort for JSON backend using created_at if available)
150-
let time_from_ok = filters.time_from.as_ref().map_or(true, |from| {
151-
e.created_at.map_or(true, |t| t >= *from)
152-
});
153-
let time_to_ok = filters.time_to.as_ref().map_or(true, |to| {
154-
e.created_at.map_or(true, |t| t <= *to)
155-
});
144+
let time_from_ok = filters
145+
.time_from
146+
.as_ref()
147+
.map_or(true, |from| e.created_at.map_or(true, |t| t >= *from));
148+
let time_to_ok = filters
149+
.time_to
150+
.as_ref()
151+
.map_or(true, |to| e.created_at.map_or(true, |t| t <= *to));
156152
source_ok && model_ok && status_ok && time_from_ok && time_to_ok
157153
})
158154
.collect();
@@ -176,9 +172,16 @@ impl StorageBackend for JsonStorageBackend {
176172
let events = self.list_events(filters).await?;
177173

178174
let total = events.len() as i64;
179-
let completed = events.iter().filter(|e| e.event_type == "review.completed").count() as i64;
175+
let completed = events
176+
.iter()
177+
.filter(|e| e.event_type == "review.completed")
178+
.count() as i64;
180179
let failed = total - completed;
181-
let error_rate = if total > 0 { failed as f64 / total as f64 } else { 0.0 };
180+
let error_rate = if total > 0 {
181+
failed as f64 / total as f64
182+
} else {
183+
0.0
184+
};
182185

183186
let total_tokens: i64 = events.iter().filter_map(|e| e.tokens_total).sum::<usize>() as i64;
184187
let avg_duration_ms = if total > 0 {
@@ -198,7 +201,9 @@ impl StorageBackend for JsonStorageBackend {
198201
let mut durations: Vec<u64> = events.iter().map(|e| e.duration_ms).collect();
199202
durations.sort();
200203
let percentile = |p: f64| -> i64 {
201-
if durations.is_empty() { return 0; }
204+
if durations.is_empty() {
205+
return 0;
206+
}
202207
let idx = ((p / 100.0) * (durations.len() as f64 - 1.0)).round() as usize;
203208
durations[idx.min(durations.len() - 1)] as i64
204209
};
@@ -210,33 +215,64 @@ impl StorageBackend for JsonStorageBackend {
210215
entry.0 += 1;
211216
entry.1 += e.duration_ms as f64;
212217
entry.2 += e.tokens_total.unwrap_or(0) as i64;
213-
if let Some(s) = e.overall_score { entry.3.push(s); }
218+
if let Some(s) = e.overall_score {
219+
entry.3.push(s);
220+
}
214221
}
215-
let by_model: Vec<ModelStats> = model_map.into_iter().map(|(model, (count, dur, tok, scores))| {
216-
let avg_s = if scores.is_empty() { None } else { Some(scores.iter().sum::<f32>() as f64 / scores.len() as f64) };
217-
ModelStats { model, count, avg_duration_ms: dur / count as f64, total_tokens: tok, avg_score: avg_s }
218-
}).collect();
222+
let by_model: Vec<ModelStats> = model_map
223+
.into_iter()
224+
.map(|(model, (count, dur, tok, scores))| {
225+
let avg_s = if scores.is_empty() {
226+
None
227+
} else {
228+
Some(scores.iter().sum::<f32>() as f64 / scores.len() as f64)
229+
};
230+
ModelStats {
231+
model,
232+
count,
233+
avg_duration_ms: dur / count as f64,
234+
total_tokens: tok,
235+
avg_score: avg_s,
236+
}
237+
})
238+
.collect();
219239

220240
// By source
221241
let mut source_map: HashMap<String, i64> = HashMap::new();
222242
for e in &events {
223243
*source_map.entry(e.diff_source.clone()).or_default() += 1;
224244
}
225-
let by_source: Vec<SourceStats> = source_map.into_iter().map(|(source, count)| SourceStats { source, count }).collect();
245+
let by_source: Vec<SourceStats> = source_map
246+
.into_iter()
247+
.map(|(source, count)| SourceStats { source, count })
248+
.collect();
226249

227250
// By repo
228251
let mut repo_map: HashMap<String, (i64, Vec<f32>)> = HashMap::new();
229252
for e in &events {
230253
if let Some(ref repo) = e.github_repo {
231254
let entry = repo_map.entry(repo.clone()).or_default();
232255
entry.0 += 1;
233-
if let Some(s) = e.overall_score { entry.1.push(s); }
256+
if let Some(s) = e.overall_score {
257+
entry.1.push(s);
258+
}
234259
}
235260
}
236-
let by_repo: Vec<RepoStats> = repo_map.into_iter().map(|(repo, (count, scores))| {
237-
let avg_s = if scores.is_empty() { None } else { Some(scores.iter().sum::<f32>() as f64 / scores.len() as f64) };
238-
RepoStats { repo, count, avg_score: avg_s }
239-
}).collect();
261+
let by_repo: Vec<RepoStats> = repo_map
262+
.into_iter()
263+
.map(|(repo, (count, scores))| {
264+
let avg_s = if scores.is_empty() {
265+
None
266+
} else {
267+
Some(scores.iter().sum::<f32>() as f64 / scores.len() as f64)
268+
};
269+
RepoStats {
270+
repo,
271+
count,
272+
avg_score: avg_s,
273+
}
274+
})
275+
.collect();
240276

241277
// Severity totals
242278
let mut severity_totals: HashMap<String, i64> = HashMap::new();

0 commit comments

Comments
 (0)