Skip to content

Commit 4adfc06

Browse files
committed
Address PR review feedback on model routing and validation logic
1 parent 5c3dc28 commit 4adfc06

2 files changed

Lines changed: 15 additions & 9 deletions

File tree

claudecode/github_action_audit.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -314,7 +314,7 @@ def run_prompt(self, repo_dir: Path, prompt: str, model: Optional[str] = None) -
314314
error_details += f"Stderr: {result.stderr}\n"
315315
error_details += f"Stdout: {result.stdout[:500]}..."
316316
return False, error_details, {}
317-
time.sleep(5 * attempt)
317+
time.sleep(5 * (attempt + 1))
318318
continue
319319

320320
success, parsed_result = parse_json_with_fallbacks(result.stdout, "Claude Code output")
@@ -348,9 +348,9 @@ def run_prompt(self, repo_dir: Path, prompt: str, model: Optional[str] = None) -
348348
except Exception as e:
349349
return False, f"Claude Code execution error: {str(e)}", {}
350350

351-
def run_code_review(self, repo_dir: Path, prompt: str) -> Tuple[bool, str, Dict[str, Any]]:
351+
def run_code_review(self, repo_dir: Path, prompt: str, model: Optional[str] = None) -> Tuple[bool, str, Dict[str, Any]]:
352352
"""Run code review prompt and normalize to findings payload."""
353-
success, error_msg, parsed = self.run_prompt(repo_dir, prompt, model=DEFAULT_CLAUDE_MODEL)
353+
success, error_msg, parsed = self.run_prompt(repo_dir, prompt, model=model or DEFAULT_CLAUDE_MODEL)
354354
if not success:
355355
return False, error_msg, {}
356356
if isinstance(parsed, dict) and 'findings' in parsed:

claudecode/review_orchestrator.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,11 @@ def _run_phase(self, repo_dir: Path, prompt: str, model: str, phase_name: str) -
6767
if hasattr(self.claude_runner, "run_prompt"):
6868
raw_result = self.claude_runner.run_prompt(repo_dir, prompt, model=model)
6969
if not (isinstance(raw_result, tuple) and len(raw_result) == 3) and hasattr(self.claude_runner, "run_code_review"):
70-
raw_result = self.claude_runner.run_code_review(repo_dir, prompt)
70+
try:
71+
raw_result = self.claude_runner.run_code_review(repo_dir, prompt, model=model)
72+
except TypeError:
73+
# Backward compatibility for legacy mocks/runners that don't accept model parameter.
74+
raw_result = self.claude_runner.run_code_review(repo_dir, prompt)
7175
if not (isinstance(raw_result, tuple) and len(raw_result) == 3):
7276
return False, {}, f"Invalid runner response for {phase_name}"
7377

@@ -217,7 +221,10 @@ def run(
217221
return False, {}, f"Validation phase failed: {err_v}"
218222

219223
validated: List[Dict[str, Any]] = []
220-
decisions = validation_result.get("validated_findings", [])
224+
has_validation_output = isinstance(validation_result, dict) and "validated_findings" in validation_result
225+
decisions = validation_result.get("validated_findings", []) if isinstance(validation_result, dict) else []
226+
if not isinstance(decisions, list):
227+
decisions = []
221228
for decision in decisions:
222229
if not isinstance(decision, dict):
223230
continue
@@ -235,10 +242,9 @@ def run(
235242
finding["confidence"] = float(confidence)
236243
validated.append(finding)
237244

238-
# Fallback: keep merged findings if validation returned no decisions
239-
if decisions and not validated:
240-
validated = []
241-
elif not decisions:
245+
# If validator did not return decisions at all, preserve candidates.
246+
# If it explicitly returned validated_findings (including empty list), trust validator output.
247+
if not has_validation_output:
242248
validated = all_candidates
243249

244250
# Apply existing filtering pipeline

0 commit comments

Comments
 (0)