Skip to content

Commit 5990367

Browse files
committed
fix: address code review issues in LLM and RAG protocols
- Fix ModelNotAvailableError to properly pass provider/model to base exception - Fix RateLimitError type annotation (Optional[str] instead of str = None) - Fix ContextLengthExceededError to accept and pass provider/model parameters - Add async aretrieve method to RetrievalStrategyProtocol for async I/O support - Add missing newline at end of protocols.py file Addresses review feedback from code reviewers. Co-authored-by: Mervin Praison <MervinPraison@users.noreply.github.com>
1 parent dacb1dc commit 5990367

2 files changed

Lines changed: 30 additions & 6 deletions

File tree

src/praisonai-agents/praisonaiagents/llm/protocols.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -233,23 +233,23 @@ def __init__(self, message: str, provider: Optional[str] = None, model: Optional
233233
class RateLimitError(LLMProviderError):
234234
"""Raised when rate limit is exceeded."""
235235

236-
def __init__(self, message: str = None, retry_after: Optional[float] = None):
236+
def __init__(self, message: Optional[str] = None, retry_after: Optional[float] = None, provider: Optional[str] = None, model: Optional[str] = None):
237237
self.retry_after = retry_after
238-
super().__init__(message or "Rate limit exceeded")
238+
super().__init__(message or "Rate limit exceeded", provider=provider, model=model)
239239

240240

241241
class ModelNotAvailableError(LLMProviderError):
242242
"""Raised when requested model is not available."""
243243

244244
def __init__(self, model: str, provider: Optional[str] = None):
245-
self.model = model
246-
super().__init__(f"Model '{model}' is not available{f' from provider {provider}' if provider else ''}")
245+
message = f"Model '{model}' is not available{f' from provider {provider}' if provider else ''}"
246+
super().__init__(message, provider=provider, model=model)
247247

248248

249249
class ContextLengthExceededError(LLMProviderError):
250250
"""Raised when input exceeds model's context length."""
251251

252-
def __init__(self, tokens: int, max_tokens: int):
252+
def __init__(self, tokens: int, max_tokens: int, provider: Optional[str] = None, model: Optional[str] = None):
253253
self.tokens = tokens
254254
self.max_tokens = max_tokens
255-
super().__init__(f"Input length ({tokens} tokens) exceeds model limit ({max_tokens} tokens)")
255+
super().__init__(f"Input length ({tokens} tokens) exceeds model limit ({max_tokens} tokens)", provider=provider, model=model)

src/praisonai-agents/praisonaiagents/rag/protocols.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,30 @@ def retrieve(
153153
List of retrieved documents with metadata
154154
"""
155155
...
156+
157+
async def aretrieve(
158+
self,
159+
query: str,
160+
knowledge_store: Any, # KnowledgeStoreProtocol
161+
*,
162+
limit: int = 10,
163+
filters: Optional[Dict[str, Any]] = None,
164+
**kwargs: Any,
165+
) -> List[Dict[str, Any]]:
166+
"""
167+
Async version of retrieve.
168+
169+
Args:
170+
query: Search query string
171+
knowledge_store: Knowledge store to search
172+
limit: Maximum number of results
173+
filters: Optional metadata filters
174+
**kwargs: Strategy-specific options
175+
176+
Returns:
177+
List of retrieved documents with metadata
178+
"""
179+
...
156180

157181

158182
@runtime_checkable

0 commit comments

Comments
 (0)