Skip to content

Commit 9a3e566

Browse files
devatsecureclaude
andcommitted
fix: Replace 22 deprecated datetime.utcnow() calls with timezone-aware alternative
Replaced across 11 files: now uses datetime.now(tz=timezone.utc) per Python 3.12+ deprecation guidance. Removed redundant 'Z' suffixes where .isoformat() now includes +00:00. Used .replace(tzinfo=None) for naive datetime arithmetic in threat_intel_enricher.py KEV date comparison. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent 7808466 commit 9a3e566

11 files changed

Lines changed: 434 additions & 644 deletions

scripts/feedback_collector.py

Lines changed: 26 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313

1414
import json
1515
import logging
16-
from datetime import datetime
16+
from datetime import datetime, timezone
1717
from pathlib import Path
1818
from typing import Any, Literal, Optional
1919

@@ -43,7 +43,7 @@ def record_feedback(
4343
feedback: Literal["tp", "fp"],
4444
reason: str,
4545
finding_details: Optional[dict[str, Any]] = None,
46-
user: str = "user"
46+
user: str = "user",
4747
) -> bool:
4848
"""
4949
Store feedback for future model improvement
@@ -69,7 +69,7 @@ def record_feedback(
6969
"feedback_label": "true_positive" if feedback == "tp" else "false_positive",
7070
"reason": reason,
7171
"user": user,
72-
"timestamp": datetime.utcnow().isoformat(),
72+
"timestamp": datetime.now(tz=timezone.utc).isoformat(),
7373
}
7474

7575
# Include finding details if provided
@@ -86,21 +86,15 @@ def record_feedback(
8686
with open(self.feedback_file, "a") as f:
8787
f.write(json.dumps(feedback_entry) + "\n")
8888

89-
logger.info(
90-
f"Recorded feedback: finding={finding_id}, "
91-
f"feedback={feedback}, reason='{reason[:50]}...'"
92-
)
89+
logger.info(f"Recorded feedback: finding={finding_id}, feedback={feedback}, reason='{reason[:50]}...'")
9390

9491
return True
9592

9693
except Exception as e:
9794
logger.error(f"Failed to record feedback: {e}")
9895
return False
9996

100-
def get_all_feedback(
101-
self,
102-
feedback_type: Optional[Literal["tp", "fp"]] = None
103-
) -> list[dict[str, Any]]:
97+
def get_all_feedback(self, feedback_type: Optional[Literal["tp", "fp"]] = None) -> list[dict[str, Any]]:
10498
"""
10599
Retrieve all feedback entries
106100
@@ -157,12 +151,7 @@ def get_feedback_by_id(self, finding_id: str) -> Optional[dict[str, Any]]:
157151

158152
return None
159153

160-
def get_similar_findings(
161-
self,
162-
finding_type: str,
163-
scanner: str,
164-
limit: int = 5
165-
) -> list[dict[str, Any]]:
154+
def get_similar_findings(self, finding_type: str, scanner: str, limit: int = 5) -> list[dict[str, Any]]:
166155
"""
167156
Retrieve past feedback for similar findings (for few-shot prompting)
168157
@@ -185,24 +174,15 @@ def get_similar_findings(
185174
similar = []
186175
for entry in all_feedback:
187176
finding = entry.get("finding", {})
188-
if (finding.get("scanner") == scanner and
189-
finding.get("finding_type") == finding_type):
177+
if finding.get("scanner") == scanner and finding.get("finding_type") == finding_type:
190178
similar.append(entry)
191179

192180
# Sort by timestamp (most recent first)
193-
similar.sort(
194-
key=lambda x: x.get("timestamp", ""),
195-
reverse=True
196-
)
181+
similar.sort(key=lambda x: x.get("timestamp", ""), reverse=True)
197182

198183
return similar[:limit]
199184

200-
def generate_few_shot_examples(
201-
self,
202-
finding_type: str,
203-
scanner: str,
204-
max_examples: int = 3
205-
) -> str:
185+
def generate_few_shot_examples(self, finding_type: str, scanner: str, max_examples: int = 3) -> str:
206186
"""
207187
Generate few-shot prompt examples from historical feedback
208188
@@ -229,9 +209,9 @@ def generate_few_shot_examples(
229209

230210
example = f"""
231211
Example {i}:
232-
Finding Type: {finding.get('finding_type', 'unknown')}
233-
Scanner: {finding.get('scanner', 'unknown')}
234-
Description: {finding.get('description', '')[:150]}
212+
Finding Type: {finding.get("finding_type", "unknown")}
213+
Scanner: {finding.get("scanner", "unknown")}
214+
Description: {finding.get("description", "")[:150]}
235215
User Feedback: {feedback_label.upper()}
236216
Reason: {reason}
237217
"""
@@ -344,19 +324,16 @@ def export_for_training(self, output_file: str) -> bool:
344324
"messages": [
345325
{
346326
"role": "system",
347-
"content": "You are a security analysis expert. Evaluate if findings are true positives or false positives."
327+
"content": "You are a security analysis expert. Evaluate if findings are true positives or false positives.",
348328
},
349329
{
350330
"role": "user",
351331
"content": f"Finding Type: {finding.get('finding_type')}\n"
352-
f"Scanner: {finding.get('scanner')}\n"
353-
f"Description: {finding.get('description')}\n"
354-
f"Is this a true positive or false positive?"
332+
f"Scanner: {finding.get('scanner')}\n"
333+
f"Description: {finding.get('description')}\n"
334+
f"Is this a true positive or false positive?",
355335
},
356-
{
357-
"role": "assistant",
358-
"content": f"{feedback_label.upper()}: {reason}"
359-
}
336+
{"role": "assistant", "content": f"{feedback_label.upper()}: {reason}"},
360337
]
361338
}
362339

@@ -392,25 +369,16 @@ def main():
392369
"""CLI interface for feedback management"""
393370
import argparse
394371

395-
parser = argparse.ArgumentParser(
396-
description="Manage user feedback on security findings"
397-
)
398-
parser.add_argument(
399-
"--feedback-dir",
400-
default=".argus/feedback",
401-
help="Feedback directory path"
402-
)
372+
parser = argparse.ArgumentParser(description="Manage user feedback on security findings")
373+
parser.add_argument("--feedback-dir", default=".argus/feedback", help="Feedback directory path")
403374

404375
subparsers = parser.add_subparsers(dest="command", help="Command to execute")
405376

406377
# Record feedback
407378
record_parser = subparsers.add_parser("record", help="Record feedback for a finding")
408379
record_parser.add_argument("finding_id", help="Finding ID")
409380
record_parser.add_argument(
410-
"--mark",
411-
choices=["tp", "fp"],
412-
required=True,
413-
help="Mark as true positive (tp) or false positive (fp)"
381+
"--mark", choices=["tp", "fp"], required=True, help="Mark as true positive (tp) or false positive (fp)"
414382
)
415383
record_parser.add_argument("--reason", required=True, help="Reason for feedback")
416384

@@ -423,11 +391,7 @@ def main():
423391

424392
# List feedback
425393
list_parser = subparsers.add_parser("list", help="List all feedback")
426-
list_parser.add_argument(
427-
"--type",
428-
choices=["tp", "fp"],
429-
help="Filter by feedback type"
430-
)
394+
list_parser.add_argument("--type", choices=["tp", "fp"], help="Filter by feedback type")
431395

432396
args = parser.parse_args()
433397

@@ -438,11 +402,7 @@ def main():
438402
collector = FeedbackCollector(args.feedback_dir)
439403

440404
if args.command == "record":
441-
success = collector.record_feedback(
442-
finding_id=args.finding_id,
443-
feedback=args.mark,
444-
reason=args.reason
445-
)
405+
success = collector.record_feedback(finding_id=args.finding_id, feedback=args.mark, reason=args.reason)
446406
if success:
447407
print(f"✅ Recorded feedback: {args.mark.upper()} for finding {args.finding_id}")
448408
else:
@@ -463,8 +423,9 @@ def main():
463423
print("\nBy Scanner:")
464424
for scanner, scanner_stats in stats["by_scanner"].items():
465425
fp_rate = (scanner_stats["fp"] / scanner_stats["total"] * 100) if scanner_stats["total"] > 0 else 0
466-
print(f" {scanner:20s}: {scanner_stats['total']:3d} total, "
467-
f"{scanner_stats['fp']:3d} FP ({fp_rate:.0f}%)")
426+
print(
427+
f" {scanner:20s}: {scanner_stats['total']:3d} total, {scanner_stats['fp']:3d} FP ({fp_rate:.0f}%)"
428+
)
468429

469430
if stats.get("recent_feedback"):
470431
print("\nRecent Feedback:")
@@ -484,7 +445,7 @@ def main():
484445
elif args.command == "list":
485446
feedback_list = collector.get_all_feedback(feedback_type=args.type)
486447

487-
print(f"\n{'='*80}")
448+
print(f"\n{'=' * 80}")
488449
print(f"FEEDBACK LIST ({len(feedback_list)} entries)")
489450
print("=" * 80)
490451

scripts/orchestrator/llm_manager.py

Lines changed: 36 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -146,24 +146,28 @@ def _apply_retry_strategy(self):
146146
provider=self.config.get("ai_provider", ""),
147147
)(self.call_llm_api)
148148
logger.debug(
149-
"Smart retry enabled (max_attempts=%d)", max_attempts,
149+
"Smart retry enabled (max_attempts=%d)",
150+
max_attempts,
150151
)
151152
else:
152153
# Legacy tenacity retry for backward compatibility
153154
self.call_llm_api = retry(
154155
stop=stop_after_attempt(max_attempts),
155156
wait=wait_exponential(multiplier=1, min=4, max=10),
156-
retry=retry_if_exception_type((
157-
ConnectionError,
158-
TimeoutError,
159-
OSError,
160-
Exception,
161-
)),
157+
retry=retry_if_exception_type(
158+
(
159+
ConnectionError,
160+
TimeoutError,
161+
OSError,
162+
Exception,
163+
)
164+
),
162165
before_sleep=before_sleep_log(logger, logging.WARNING),
163166
reraise=True,
164167
)(self.call_llm_api)
165168
logger.debug(
166-
"Legacy tenacity retry enabled (max_attempts=%d)", max_attempts,
169+
"Legacy tenacity retry enabled (max_attempts=%d)",
170+
max_attempts,
167171
)
168172

169173
def detect_provider(self) -> str:
@@ -190,7 +194,9 @@ def detect_provider(self) -> str:
190194
return "claude-cli"
191195
else:
192196
logger.warning("No AI provider configured")
193-
logger.info("Set one of: ANTHROPIC_API_KEY, OPENAI_API_KEY, OLLAMA_ENDPOINT, or use --ai-provider claude-cli")
197+
logger.info(
198+
"Set one of: ANTHROPIC_API_KEY, OPENAI_API_KEY, OLLAMA_ENDPOINT, or use --ai-provider claude-cli"
199+
)
194200
return None
195201

196202
def initialize(self, provider: str = None) -> bool:
@@ -271,7 +277,9 @@ def _get_client(self, provider: str):
271277
endpoint = self.config.get("ollama_endpoint", "http://localhost:11434")
272278
# Sanitize endpoint URL for logging
273279
safe_endpoint = (
274-
str(endpoint).split("@")[-1] if "@" in str(endpoint) else str(endpoint).split("//")[-1].split("/")[0]
280+
str(endpoint).split("@")[-1]
281+
if "@" in str(endpoint)
282+
else str(endpoint).split("//")[-1].split("/")[0]
275283
)
276284
logger.info(f"Using Ollama endpoint: {safe_endpoint}")
277285
return OpenAI(base_url=f"{endpoint}/v1", api_key="ollama"), "ollama"
@@ -347,9 +355,7 @@ def _get_working_model_with_fallback(self, client, initial_model: str) -> str:
347355
# Quick test with minimal tokens
348356
safe_model_name = str(model_id).split("/")[-1] if model_id else "unknown"
349357
logger.debug(f"Testing model: {safe_model_name}")
350-
client.messages.create(
351-
model=model_id, max_tokens=10, messages=[{"role": "user", "content": "test"}]
352-
)
358+
client.messages.create(model=model_id, max_tokens=10, messages=[{"role": "user", "content": "test"}])
353359
logger.info(f"Found working model: {safe_model_name}")
354360
return model_id
355361
except Exception as e:
@@ -415,12 +421,7 @@ def calculate_actual_cost(input_tokens: int, output_tokens: int, provider: str)
415421

416422
return input_cost + output_cost
417423

418-
def generate_few_shot_examples(
419-
self,
420-
finding_type: str,
421-
scanner: str,
422-
max_examples: int = 3
423-
) -> str:
424+
def generate_few_shot_examples(self, finding_type: str, scanner: str, max_examples: int = 3) -> str:
424425
"""
425426
Generate few-shot examples from historical feedback
426427
@@ -437,9 +438,7 @@ def generate_few_shot_examples(
437438

438439
try:
439440
return self.feedback_collector.generate_few_shot_examples(
440-
finding_type=finding_type,
441-
scanner=scanner,
442-
max_examples=max_examples
441+
finding_type=finding_type, scanner=scanner, max_examples=max_examples
443442
)
444443
except Exception as e:
445444
logger.debug(f"Could not generate few-shot examples: {e}")
@@ -453,7 +452,7 @@ def log_ai_decision(
453452
decision: str,
454453
reasoning: str,
455454
confidence: float,
456-
noise_score: float = 0.0
455+
noise_score: float = 0.0,
457456
) -> bool:
458457
"""
459458
Log AI triage decision for analysis
@@ -474,7 +473,7 @@ def log_ai_decision(
474473
return False
475474

476475
try:
477-
from datetime import datetime
476+
from datetime import datetime, timezone
478477

479478
decision_entry = {
480479
"finding_id": finding_id,
@@ -485,7 +484,7 @@ def log_ai_decision(
485484
"confidence": confidence,
486485
"noise_score": noise_score,
487486
"model": self.model,
488-
"timestamp": datetime.utcnow().isoformat(),
487+
"timestamp": datetime.now(tz=timezone.utc).isoformat(),
489488
}
490489

491490
return self.cache_manager.log_decision(decision_entry)
@@ -500,7 +499,7 @@ def call_llm_api(
500499
max_tokens: int,
501500
circuit_breaker: "CostCircuitBreaker" = None,
502501
operation: str = "LLM call",
503-
few_shot_prefix: str = ""
502+
few_shot_prefix: str = "",
504503
) -> tuple:
505504
"""Call LLM API with retry logic, cost enforcement, and few-shot learning
506505
@@ -563,11 +562,14 @@ def call_llm_api(
563562
[
564563
self.client, # path to claude binary
565564
"--print",
566-
"--model", self.model,
567-
"--output-format", "json",
565+
"--model",
566+
self.model,
567+
"--output-format",
568+
"json",
568569
"--no-session-persistence",
569570
"--dangerously-skip-permissions",
570-
"--max-turns", "1",
571+
"--max-turns",
572+
"1",
571573
],
572574
input=full_prompt,
573575
capture_output=True,
@@ -635,6 +637,7 @@ def generate(self, user_prompt: str, system_prompt: str = "", max_tokens: int =
635637

636638
class _Usage:
637639
"""Minimal usage object matching Anthropic/OpenAI response.usage."""
640+
638641
__slots__ = ("input_tokens", "output_tokens")
639642

640643
def __init__(self, input_tokens: int, output_tokens: int):
@@ -644,6 +647,7 @@ def __init__(self, input_tokens: int, output_tokens: int):
644647

645648
class LLMResponse:
646649
"""Lightweight response wrapper compatible with IRISAnalyzer._parse_llm_response."""
650+
647651
__slots__ = ("content", "usage")
648652

649653
def __init__(self, text: str, input_tokens: int = 0, output_tokens: int = 0):
@@ -741,7 +745,9 @@ def calculate_actual_cost(input_tokens: int, output_tokens: int, provider: str)
741745
return LLMManager.calculate_actual_cost(input_tokens, output_tokens, provider)
742746

743747

744-
def call_llm_api(client, provider: str, model: str, prompt: str, max_tokens: int, circuit_breaker=None, operation: str = "LLM call") -> tuple:
748+
def call_llm_api(
749+
client, provider: str, model: str, prompt: str, max_tokens: int, circuit_breaker=None, operation: str = "LLM call"
750+
) -> tuple:
745751
"""Call LLM API with retry logic and cost enforcement
746752
747753
Args:

0 commit comments

Comments
 (0)