Skip to content

Commit d9d232f

Browse files
haasonsaasclaude
andcommitted
chore: fix all clippy -D warnings + add pre-commit hooks
- Fix items_after_test_module in misc.rs, context.rs, interactive.rs by moving code before test modules - Fix cloned_ref_to_slice_refs: use std::slice::from_ref - Fix redundant_closure in compression.rs - Fix len_zero comparisons in context.rs and verification.rs - Fix manual_range_contains in smart_response.rs - Add .githooks/pre-commit running fmt, clippy -D warnings, and tests 983 tests pass, zero clippy warnings. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent f421fb7 commit d9d232f

File tree

7 files changed

+145
-120
lines changed

7 files changed

+145
-120
lines changed

.githooks/pre-commit

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
#!/usr/bin/env bash
2+
set -e
3+
4+
echo "==> Running cargo fmt --check..."
5+
cargo fmt --check
6+
if [ $? -ne 0 ]; then
7+
echo "ERROR: cargo fmt found formatting issues. Run 'cargo fmt' to fix."
8+
exit 1
9+
fi
10+
11+
echo "==> Running cargo clippy..."
12+
cargo clippy --all-targets -- -D warnings
13+
if [ $? -ne 0 ]; then
14+
echo "ERROR: cargo clippy found warnings. Fix them before committing."
15+
exit 1
16+
fi
17+
18+
echo "==> Running cargo test..."
19+
cargo test
20+
if [ $? -ne 0 ]; then
21+
echo "ERROR: Tests failed. Fix them before committing."
22+
exit 1
23+
fi
24+
25+
echo "==> All pre-commit checks passed."

src/commands/misc.rs

Lines changed: 70 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -404,76 +404,6 @@ fn resolve_convention_store_path_for_feedback(config: &config::Config) -> Option
404404
dirs::data_local_dir().map(|d| d.join("diffscope").join("conventions.json"))
405405
}
406406

407-
#[cfg(test)]
408-
mod tests {
409-
use super::*;
410-
411-
#[test]
412-
fn test_select_discussion_comment_empty_comments() {
413-
// Should return an error, not panic
414-
let result = select_discussion_comment(&[], None, None);
415-
assert!(result.is_err());
416-
}
417-
418-
#[test]
419-
fn test_select_discussion_comment_defaults_to_first() {
420-
let comment = core::Comment {
421-
id: "cmt_1".to_string(),
422-
file_path: PathBuf::from("test.rs"),
423-
line_number: 1,
424-
content: "test".to_string(),
425-
rule_id: None,
426-
severity: core::comment::Severity::Info,
427-
category: core::comment::Category::BestPractice,
428-
suggestion: None,
429-
confidence: 0.8,
430-
code_suggestion: None,
431-
tags: vec![],
432-
fix_effort: core::comment::FixEffort::Low,
433-
feedback: None,
434-
};
435-
let result = select_discussion_comment(&[comment.clone()], None, None).unwrap();
436-
assert_eq!(result.id, "cmt_1");
437-
}
438-
439-
#[test]
440-
fn test_feedback_stats_not_double_counted() {
441-
// Simulate accepting the same comment twice — stats should only increment once
442-
let mut store = review::FeedbackStore::default();
443-
let comment = core::Comment {
444-
id: "cmt_dup".to_string(),
445-
file_path: PathBuf::from("test.rs"),
446-
line_number: 1,
447-
content: "test".to_string(),
448-
rule_id: None,
449-
severity: core::comment::Severity::Warning,
450-
category: core::comment::Category::Bug,
451-
suggestion: None,
452-
confidence: 0.8,
453-
code_suggestion: None,
454-
tags: vec![],
455-
fix_effort: core::comment::FixEffort::Low,
456-
feedback: None,
457-
};
458-
459-
let comments = vec![comment];
460-
461-
// Accept the same batch of comments twice
462-
for _ in 0..2 {
463-
apply_feedback_accept(&mut store, &comments);
464-
}
465-
466-
let key = review::classify_comment_type(&comments[0])
467-
.as_str()
468-
.to_string();
469-
let stats = &store.by_comment_type[&key];
470-
assert_eq!(
471-
stats.accepted, 1,
472-
"Stats should only count 1 acceptance, not 2 (double-counting bug)"
473-
);
474-
}
475-
}
476-
477407
fn load_discussion_thread(path: Option<&std::path::Path>, comment_id: &str) -> DiscussionThread {
478408
let Some(path) = path else {
479409
return DiscussionThread {
@@ -567,3 +497,73 @@ async fn answer_discussion_question(
567497
let response = adapter.complete(request).await?;
568498
Ok(response.content)
569499
}
500+
501+
#[cfg(test)]
502+
mod tests {
503+
use super::*;
504+
505+
#[test]
506+
fn test_select_discussion_comment_empty_comments() {
507+
// Should return an error, not panic
508+
let result = select_discussion_comment(&[], None, None);
509+
assert!(result.is_err());
510+
}
511+
512+
#[test]
513+
fn test_select_discussion_comment_defaults_to_first() {
514+
let comment = core::Comment {
515+
id: "cmt_1".to_string(),
516+
file_path: PathBuf::from("test.rs"),
517+
line_number: 1,
518+
content: "test".to_string(),
519+
rule_id: None,
520+
severity: core::comment::Severity::Info,
521+
category: core::comment::Category::BestPractice,
522+
suggestion: None,
523+
confidence: 0.8,
524+
code_suggestion: None,
525+
tags: vec![],
526+
fix_effort: core::comment::FixEffort::Low,
527+
feedback: None,
528+
};
529+
let result = select_discussion_comment(std::slice::from_ref(&comment), None, None).unwrap();
530+
assert_eq!(result.id, "cmt_1");
531+
}
532+
533+
#[test]
534+
fn test_feedback_stats_not_double_counted() {
535+
// Simulate accepting the same comment twice — stats should only increment once
536+
let mut store = review::FeedbackStore::default();
537+
let comment = core::Comment {
538+
id: "cmt_dup".to_string(),
539+
file_path: PathBuf::from("test.rs"),
540+
line_number: 1,
541+
content: "test".to_string(),
542+
rule_id: None,
543+
severity: core::comment::Severity::Warning,
544+
category: core::comment::Category::Bug,
545+
suggestion: None,
546+
confidence: 0.8,
547+
code_suggestion: None,
548+
tags: vec![],
549+
fix_effort: core::comment::FixEffort::Low,
550+
feedback: None,
551+
};
552+
553+
let comments = vec![comment];
554+
555+
// Accept the same batch of comments twice
556+
for _ in 0..2 {
557+
apply_feedback_accept(&mut store, &comments);
558+
}
559+
560+
let key = review::classify_comment_type(&comments[0])
561+
.as_str()
562+
.to_string();
563+
let stats = &store.by_comment_type[&key];
564+
assert_eq!(
565+
stats.accepted, 1,
566+
"Stats should only count 1 acceptance, not 2 (double-counting bug)"
567+
);
568+
}
569+
}

src/core/context.rs

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -288,6 +288,16 @@ fn truncate_with_notice(mut content: String, max_chars: usize) -> String {
288288
content
289289
}
290290

291+
async fn read_file_lossy(path: &Path) -> Result<String> {
292+
match tokio::fs::read_to_string(path).await {
293+
Ok(content) => Ok(content),
294+
Err(_) => {
295+
let bytes = tokio::fs::read(path).await?;
296+
Ok(String::from_utf8_lossy(&bytes).to_string())
297+
}
298+
}
299+
}
300+
291301
#[cfg(test)]
292302
mod tests {
293303
use super::*;
@@ -304,7 +314,7 @@ mod tests {
304314
assert!(result.contains("[Truncated]"));
305315
// Verify the result is valid UTF-8 (it is since it's a String, but
306316
// the point is truncate() would have panicked)
307-
assert!(result.len() > 0);
317+
assert!(!result.is_empty());
308318
}
309319

310320
#[test]
@@ -364,13 +374,3 @@ mod tests {
364374
assert!(chunk.content.contains("pub fn process"));
365375
}
366376
}
367-
368-
async fn read_file_lossy(path: &Path) -> Result<String> {
369-
match tokio::fs::read_to_string(path).await {
370-
Ok(content) => Ok(content),
371-
Err(_) => {
372-
let bytes = tokio::fs::read(path).await?;
373-
Ok(String::from_utf8_lossy(&bytes).to_string())
374-
}
375-
}
376-
}

src/core/interactive.rs

Lines changed: 36 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -246,6 +246,42 @@ Interactive commands respect these configurations."#
246246
}
247247
}
248248

249+
/// Manages per-session ignore patterns from @diffscope ignore commands.
250+
/// Will be wired into the review pipeline's triage filter.
251+
#[allow(dead_code)]
252+
pub struct InteractiveProcessor {
253+
ignored_patterns: HashSet<String>,
254+
}
255+
256+
#[allow(dead_code)]
257+
impl InteractiveProcessor {
258+
pub fn new() -> Self {
259+
Self {
260+
ignored_patterns: HashSet::new(),
261+
}
262+
}
263+
264+
pub fn add_ignore_pattern(&mut self, pattern: &str) {
265+
self.ignored_patterns.insert(pattern.to_string());
266+
}
267+
268+
pub fn should_ignore(&self, path: &str) -> bool {
269+
self.ignored_patterns.iter().any(|pattern| {
270+
// Simple glob matching
271+
if pattern.contains('*') {
272+
// Escape regex metacharacters, then convert glob * to .*
273+
let escaped = regex::escape(pattern).replace(r"\*", ".*");
274+
let regex_pattern = format!("^{}$", escaped);
275+
regex::Regex::new(&regex_pattern)
276+
.map(|re| re.is_match(path))
277+
.unwrap_or(false)
278+
} else {
279+
path.contains(pattern)
280+
}
281+
})
282+
}
283+
}
284+
249285
#[cfg(test)]
250286
mod tests {
251287
use super::*;
@@ -461,39 +497,3 @@ mod tests {
461497
);
462498
}
463499
}
464-
465-
/// Manages per-session ignore patterns from @diffscope ignore commands.
466-
/// Will be wired into the review pipeline's triage filter.
467-
#[allow(dead_code)]
468-
pub struct InteractiveProcessor {
469-
ignored_patterns: HashSet<String>,
470-
}
471-
472-
#[allow(dead_code)]
473-
impl InteractiveProcessor {
474-
pub fn new() -> Self {
475-
Self {
476-
ignored_patterns: HashSet::new(),
477-
}
478-
}
479-
480-
pub fn add_ignore_pattern(&mut self, pattern: &str) {
481-
self.ignored_patterns.insert(pattern.to_string());
482-
}
483-
484-
pub fn should_ignore(&self, path: &str) -> bool {
485-
self.ignored_patterns.iter().any(|pattern| {
486-
// Simple glob matching
487-
if pattern.contains('*') {
488-
// Escape regex metacharacters, then convert glob * to .*
489-
let escaped = regex::escape(pattern).replace(r"\*", ".*");
490-
let regex_pattern = format!("^{}$", escaped);
491-
regex::Regex::new(&regex_pattern)
492-
.map(|re| re.is_match(path))
493-
.unwrap_or(false)
494-
} else {
495-
path.contains(pattern)
496-
}
497-
})
498-
}
499-
}

src/parsing/smart_response.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ pub fn parse_smart_confidence(value: &str) -> Option<f32> {
173173
let has_percent = raw.ends_with('%');
174174
let trimmed = raw.trim_end_matches('%');
175175
if let Ok(num) = trimmed.parse::<f32>() {
176-
if !has_percent && num >= 0.0 && num <= 1.0 {
176+
if !has_percent && (0.0..=1.0).contains(&num) {
177177
// Bare decimal in 0..1 range — treat as already-normalized confidence
178178
Some(num.clamp(0.0, 1.0))
179179
} else {

src/review/compression.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -846,7 +846,7 @@ mod tests {
846846
)])],
847847
),
848848
];
849-
let total = diffs.iter().map(|d| estimate_diff_tokens(d)).sum::<usize>();
849+
let total = diffs.iter().map(estimate_diff_tokens).sum::<usize>();
850850
// Budget smaller than total forces Stage 2, where deletion-only hunks get removed
851851
let result = compress_diffs(&diffs, total / 2, 5);
852852
// All files should be skipped since they're deletion-only after compression

src/review/verification.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -462,7 +462,7 @@ mod tests {
462462
// Both should be captured (first one wins in filter since find() returns first)
463463
let c1_results: Vec<_> = results.iter().filter(|r| r.comment_id == "c1").collect();
464464
assert!(
465-
c1_results.len() >= 1,
465+
!c1_results.is_empty(),
466466
"Should have at least one result for c1"
467467
);
468468
}

0 commit comments

Comments
 (0)