Skip to content

Commit 6646dd1

Browse files
refactor: SDK Architecture - Centralize Logging and Retry Logic (#1212)
Merges centralized logging (_logging.py, 152 files migrated) and resilience utility (utils/resilience.py). Also fixes CI/CD workflows and test fixtures. Closes #1131 #1132
2 parents 64ae40e + ba3a3c3 commit 6646dd1

168 files changed

Lines changed: 1260 additions & 1109 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.github/workflows/test-core.yml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,9 +64,10 @@ jobs:
6464
|| echo "Some integration tests failed"
6565
6666
- name: Upload coverage to Codecov
67+
if: always()
6768
uses: codecov/codecov-action@v5
6869
with:
69-
token: ${{ secrets.CODECOV_TOKEN }}
70-
file: src/praisonai/coverage.xml
70+
token: ${{ secrets.CODECOV_TOKEN || '' }}
71+
files: src/praisonai/coverage.xml
7172
flags: core-tests
7273
fail_ci_if_error: false

.github/workflows/test-optimized.yml

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -157,8 +157,8 @@ jobs:
157157
if: matrix.python-version == '3.11'
158158
uses: codecov/codecov-action@v5
159159
with:
160-
token: ${{ secrets.CODECOV_TOKEN }}
161-
file: src/praisonai/coverage.xml
160+
token: ${{ secrets.CODECOV_TOKEN || '' }}
161+
files: src/praisonai/coverage.xml
162162
flags: main-tests
163163
fail_ci_if_error: false
164164

@@ -301,8 +301,10 @@ jobs:
301301
steps:
302302
- name: Check if Google key is available
303303
id: check-key
304+
env:
305+
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
304306
run: |
305-
if [ -n "${{ secrets.GOOGLE_API_KEY }}" ]; then
307+
if [ -n "$GOOGLE_API_KEY" ]; then
306308
echo "has_key=true" >> $GITHUB_OUTPUT
307309
else
308310
echo "has_key=false" >> $GITHUB_OUTPUT
@@ -356,8 +358,10 @@ jobs:
356358
steps:
357359
- name: Check if Groq key is available
358360
id: check-key
361+
env:
362+
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
359363
run: |
360-
if [ -n "${{ secrets.GROQ_API_KEY }}" ]; then
364+
if [ -n "$GROQ_API_KEY" ]; then
361365
echo "has_key=true" >> $GITHUB_OUTPUT
362366
else
363367
echo "has_key=false" >> $GITHUB_OUTPUT
@@ -411,8 +415,10 @@ jobs:
411415
steps:
412416
- name: Check if xAI key is available
413417
id: check-key
418+
env:
419+
XAI_API_KEY: ${{ secrets.XAI_API_KEY }}
414420
run: |
415-
if [ -n "${{ secrets.XAI_API_KEY }}" ]; then
421+
if [ -n "$XAI_API_KEY" ]; then
416422
echo "has_key=true" >> $GITHUB_OUTPUT
417423
else
418424
echo "has_key=false" >> $GITHUB_OUTPUT
@@ -466,8 +472,10 @@ jobs:
466472
steps:
467473
- name: Check if Cohere key is available
468474
id: check-key
475+
env:
476+
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
469477
run: |
470-
if [ -n "${{ secrets.COHERE_API_KEY }}" ]; then
478+
if [ -n "$COHERE_API_KEY" ]; then
471479
echo "has_key=true" >> $GITHUB_OUTPUT
472480
else
473481
echo "has_key=false" >> $GITHUB_OUTPUT

docker/Dockerfile.chat

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=4.5.88" \
19+
"praisonai>=4.5.89" \
2020
"praisonai[chat]" \
2121
"embedchain[github,youtube]"
2222

docker/Dockerfile.dev

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ RUN mkdir -p /root/.praison
2020
# Install Python packages (using latest versions)
2121
RUN pip install --no-cache-dir \
2222
praisonai_tools \
23-
"praisonai>=4.5.88" \
23+
"praisonai>=4.5.89" \
2424
"praisonai[ui]" \
2525
"praisonai[chat]" \
2626
"praisonai[realtime]" \

docker/Dockerfile.ui

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=4.5.88" \
19+
"praisonai>=4.5.89" \
2020
"praisonai[ui]" \
2121
"praisonai[crewai]"
2222

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 20 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import time
33
import json
44
import logging
5+
from praisonaiagents._logging import get_logger
56
import asyncio
67
import contextlib
78
import threading
@@ -16,7 +17,7 @@
1617
from .session_manager import SessionManagerMixin
1718

1819
# Module-level logger for thread safety errors and debugging
19-
logger = logging.getLogger(__name__)
20+
logger = get_logger(__name__)
2021

2122
# ============================================================================
2223
# Performance: Lazy imports for heavy dependencies
@@ -195,7 +196,6 @@ def __init__(self, agent_name: str, total_cost: float, max_budget: float):
195196
f"${total_cost:.4f} >= ${max_budget:.4f}"
196197
)
197198

198-
199199
class Agent(ToolExecutionMixin, ChatHandlerMixin, SessionManagerMixin):
200200
# Class-level counter for generating unique display names for nameless agents
201201
_agent_counter = 0
@@ -255,16 +255,16 @@ def _get_default_model(cls):
255255
def _configure_logging(cls):
256256
"""Configure logging settings once for all agent instances."""
257257
# Configure logging to suppress unwanted outputs
258-
logging.getLogger("litellm").setLevel(logging.WARNING)
258+
get_logger("litellm").setLevel(logging.WARNING)
259259

260260
# Allow httpx logging when LOGLEVEL=debug, otherwise suppress it
261261
loglevel = os.environ.get('LOGLEVEL', 'INFO').upper()
262262
if loglevel == 'DEBUG':
263-
logging.getLogger("httpx").setLevel(logging.INFO)
264-
logging.getLogger("httpcore").setLevel(logging.INFO)
263+
get_logger("httpx").setLevel(logging.INFO)
264+
get_logger("httpcore").setLevel(logging.INFO)
265265
else:
266-
logging.getLogger("httpx").setLevel(logging.WARNING)
267-
logging.getLogger("httpcore").setLevel(logging.WARNING)
266+
get_logger("httpx").setLevel(logging.WARNING)
267+
get_logger("httpcore").setLevel(logging.WARNING)
268268

269269
@classmethod
270270
def from_template(
@@ -2771,7 +2771,7 @@ def run_autonomous(
27712771
"agent_name": getattr(self, 'name', None),
27722772
})
27732773
elif self.autonomy_config.get("observe"):
2774-
logging.getLogger(__name__).info(
2774+
get_logger(__name__).info(
27752775
f"[autonomy] iteration={iterations} stage={stage} "
27762776
f"response_len={len(response_str)}"
27772777
)
@@ -3161,7 +3161,7 @@ async def main():
31613161
"agent_name": getattr(self, 'name', None),
31623162
})
31633163
elif self.autonomy_config.get("observe"):
3164-
logging.getLogger(__name__).info(
3164+
get_logger(__name__).info(
31653165
f"[autonomy-async] iteration={iterations} stage={stage} "
31663166
f"response_len={len(response_str)}"
31673167
)
@@ -6337,7 +6337,7 @@ def _chat_impl(self, prompt, temperature, tools, output_json, output_pydantic, r
63376337
self._final_display_shown = False
63386338

63396339
# Log all parameter values when in debug mode
6340-
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
6340+
if get_logger().getEffectiveLevel() == logging.DEBUG:
63416341
param_info = {
63426342
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
63436343
"temperature": temperature,
@@ -6503,7 +6503,7 @@ def _chat_impl(self, prompt, temperature, tools, output_json, output_pydantic, r
65036503
self._persist_message("assistant", response_text)
65046504

65056505
# Log completion time if in debug mode
6506-
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
6506+
if get_logger().getEffectiveLevel() == logging.DEBUG:
65076507
total_time = time.time() - start_time
65086508
logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
65096509

@@ -6846,7 +6846,7 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda
68466846
self._final_display_shown = False
68476847

68486848
# Log all parameter values when in debug mode
6849-
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
6849+
if get_logger().getEffectiveLevel() == logging.DEBUG:
68506850
param_info = {
68516851
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
68526852
"temperature": temperature,
@@ -6950,7 +6950,7 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda
69506950

69516951
self.chat_history.append({"role": "assistant", "content": response_text})
69526952

6953-
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
6953+
if get_logger().getEffectiveLevel() == logging.DEBUG:
69546954
total_time = time.time() - start_time
69556955
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
69566956

@@ -6969,7 +6969,7 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda
69696969
# Rollback chat history if LLM call fails
69706970
self.chat_history = self.chat_history[:chat_history_length]
69716971
_get_display_functions()['display_error'](f"Error in LLM chat: {e}")
6972-
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
6972+
if get_logger().getEffectiveLevel() == logging.DEBUG:
69736973
total_time = time.time() - start_time
69746974
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
69756975
return None
@@ -7058,7 +7058,7 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda
70587058
tools=formatted_tools,
70597059
)
70607060
result = await self._achat_completion(response, tools)
7061-
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
7061+
if get_logger().getEffectiveLevel() == logging.DEBUG:
70627062
total_time = time.time() - start_time
70637063
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
70647064
# Execute callback after tool completion
@@ -7072,7 +7072,7 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda
70727072
response_format={"type": "json_object"}
70737073
)
70747074
response_text = response.choices[0].message.content
7075-
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
7075+
if get_logger().getEffectiveLevel() == logging.DEBUG:
70767076
total_time = time.time() - start_time
70777077
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
70787078
# Execute callback after JSON/Pydantic completion
@@ -7114,7 +7114,7 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda
71147114
# Return the original response without reflection
71157115
self.chat_history.append({"role": "user", "content": original_prompt})
71167116
self.chat_history.append({"role": "assistant", "content": response_text})
7117-
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
7117+
if get_logger().getEffectiveLevel() == logging.DEBUG:
71187118
total_time = time.time() - start_time
71197119
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
71207120
return await self._atrigger_after_agent_hook(original_prompt, response_text, start_time)
@@ -7166,7 +7166,7 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda
71667166
break
71677167
continue
71687168

7169-
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
7169+
if get_logger().getEffectiveLevel() == logging.DEBUG:
71707170
total_time = time.time() - start_time
71717171
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
71727172

@@ -7183,13 +7183,13 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda
71837183
return None
71847184
except Exception as e:
71857185
_get_display_functions()['display_error'](f"Error in chat completion: {e}")
7186-
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
7186+
if get_logger().getEffectiveLevel() == logging.DEBUG:
71877187
total_time = time.time() - start_time
71887188
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
71897189
return None
71907190
except Exception as e:
71917191
_get_display_functions()['display_error'](f"Error in achat: {e}")
7192-
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
7192+
if get_logger().getEffectiveLevel() == logging.DEBUG:
71937193
total_time = time.time() - start_time
71947194
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
71957195
return None
@@ -7271,7 +7271,6 @@ async def _achat_completion(self, response, tools, reasoning_steps=False):
72717271
_get_display_functions()['display_error'](f"Error executing tool {function_name}: {e}")
72727272
results.append(None)
72737273

7274-
72757274
# If we have results, format them into a response
72767275
if results:
72777276
formatted_results = "\n".join([str(r) for r in results if r is not None])

src/praisonai-agents/praisonaiagents/agent/audio_agent.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
"""
1010
import os
1111
import logging
12+
from praisonaiagents._logging import get_logger
1213
import warnings
1314
from dataclasses import dataclass, field
1415
from typing import Optional, Any, Dict, Union, BinaryIO
@@ -17,7 +18,6 @@
1718
# Filter out Pydantic warning about fields
1819
warnings.filterwarnings("ignore", "Valid config keys have changed in V2", UserWarning)
1920

20-
2121
# ─────────────────────────────────────────────────────────────────────────────
2222
# AudioConfig - Configuration dataclass following feature_configs.py patterns
2323
# ─────────────────────────────────────────────────────────────────────────────
@@ -59,7 +59,6 @@ def to_dict(self) -> Dict[str, Any]:
5959
"api_key": self.api_key,
6060
}
6161

62-
6362
# ─────────────────────────────────────────────────────────────────────────────
6463
# AudioAgent Class - Agent-centric audio processing
6564
# ─────────────────────────────────────────────────────────────────────────────
@@ -192,10 +191,10 @@ def litellm(self):
192191
def _configure_logging(self, verbose: Union[bool, int]) -> None:
193192
"""Configure logging levels."""
194193
if isinstance(verbose, int) and verbose >= 10:
195-
logging.getLogger("litellm").setLevel(logging.DEBUG)
194+
get_logger("litellm").setLevel(logging.DEBUG)
196195
else:
197-
logging.getLogger("litellm").setLevel(logging.WARNING)
198-
logging.getLogger("httpx").setLevel(logging.WARNING)
196+
get_logger("litellm").setLevel(logging.WARNING)
197+
get_logger("httpx").setLevel(logging.WARNING)
199198

200199
def _get_model_params(self, model: Optional[str] = None) -> Dict[str, Any]:
201200
"""Build parameters for LiteLLM calls."""

src/praisonai-agents/praisonaiagents/agent/autonomy.py

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,9 @@
3535
from typing import Optional, Dict, Any, List, Set
3636
from enum import Enum
3737
import logging
38+
from praisonaiagents._logging import get_logger
3839

39-
logger = logging.getLogger(__name__)
40+
logger = get_logger(__name__)
4041

4142
# ============================================================================
4243
# Unified Stage Enum (G-DUP-1 fix: single source of truth)
@@ -72,7 +73,6 @@
7273
# Reverse map for converting autonomy signal strings back to EscalationSignal values
7374
_AUTONOMY_TO_ESCALATION_SIGNAL = {v: k for k, v in _ESCALATION_TO_AUTONOMY_SIGNAL.items()}
7475

75-
7676
# Valid autonomy modes
7777
VALID_AUTONOMY_MODES = {"caller", "iterative"}
7878

@@ -83,7 +83,6 @@
8383
"full_auto": "iterative",
8484
}
8585

86-
8786
@dataclass
8887
class AutonomyConfig:
8988
"""Configuration for Agent autonomy features.
@@ -196,7 +195,6 @@ def from_dict(cls, data: Dict[str, Any]) -> "AutonomyConfig":
196195
default_tools=data.get("default_tools"),
197196
)
198197

199-
200198
class AutonomySignal(str, Enum):
201199
"""Signals detected from prompts for autonomy decisions.
202200
@@ -225,7 +223,6 @@ def __init_subclass__(cls, **kwargs):
225223
)
226224
super().__init_subclass__(**kwargs)
227225

228-
229226
def _warn_autonomy_signal():
230227
"""Emit deprecation warning when AutonomySignal is accessed."""
231228
from ..utils.deprecation import warn_deprecated_param
@@ -237,7 +234,6 @@ def _warn_autonomy_signal():
237234
stacklevel=4
238235
)
239236

240-
241237
class AutonomyTrigger:
242238
"""Detects signals from prompts for autonomy decisions.
243239
@@ -283,7 +279,6 @@ def recommend_stage(self, signals: Set[str]) -> AutonomyStage:
283279
pass
284280
return self._delegate.recommend_stage(esc_signals)
285281

286-
287282
@dataclass
288283
class AutonomyResult:
289284
"""Result of an autonomous execution.
@@ -318,7 +313,6 @@ def __str__(self) -> str:
318313
"""
319314
return self.output or ""
320315

321-
322316
class DoomLoopTracker:
323317
"""Tracks actions to detect doom loops with graduated recovery.
324318
@@ -421,7 +415,6 @@ def reset(self) -> None:
421415
self._delegate.start_session()
422416
self._recovery_attempts = 0
423417

424-
425418
class AutonomyMixin:
426419
"""Helper-only trait for autonomy utilities.
427420

0 commit comments

Comments
 (0)