Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
130 changes: 113 additions & 17 deletions .github/workflows/pylint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,42 +8,138 @@ on:
workflow_dispatch:

jobs:
lint:
lint-and-test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10"]
fail-fast: false
# No matrix strategy - only running on Python 3.10

steps:
- uses: actions/checkout@v4

- name: Set up Python ${{ matrix.python-version }}
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
python-version: "3.10"
cache: 'pip'

- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pylint pytest pytest-cov
pip install pylint pytest pytest-cov pytest-html bc ansi2html
# Install main package in development mode to ensure proper module imports
if [ -f movie_idea_generator/pyproject.toml ]; then
pip install -e ./movie_idea_generator[dev]
fi
if [ -f recommender_api/requirements.txt ]; then
pip install -r recommender_api/requirements.txt
fi
# Install any additional requirements
if [ -f requirements.txt ]; then
pip install -r requirements.txt
fi

- name: Analyzing code with pylint
run: |
# Exclude files containing secrets, virtual environments, and cache directories
pylint $(find . -type f -name "*.py" ! -path "*/\.*" ! -path "*/venv/*" ! -path "*/.venv/*" ! -path "*/secrets.py" ! -path "*/__pycache__/*")
continue-on-error: true # Don't fail CI/CD if linting issues are found

- name: Run pytest with coverage
run: |
if [ -d movie_idea_generator ]; then
cd movie_idea_generator && python -m pytest
# Set minimum score threshold (1.0/10)
THRESHOLD=1.0

# Set absolute minimum score threshold (1.0/10) - code will fail CI if below this score
CRITICAL_THRESHOLD=1.0

# Create a report file directory
mkdir -p pylint_reports

echo "::group::Finding Python files to analyze"
# Find Python files to analyze, excluding sensitive and cache directories
FILES_TO_CHECK=$(find . -type f -name "*.py" ! -path "*/\.*" ! -path "*/venv/*" ! -path "*/.venv/*" ! -path "*/secrets.py" ! -path "*/__pycache__/*")
echo "Files to check:"
echo "$FILES_TO_CHECK"
echo "::endgroup::"

echo "::group::Running pylint analysis"
# Run pylint with colorized output and capture exit code
pylint --output-format=colorized $FILES_TO_CHECK > pylint_reports/pylint_colorized.txt 2>&1 || true
PYLINT_EXIT_CODE=$?

# Also create a plain text version
pylint --output-format=text $FILES_TO_CHECK > pylint_reports/pylint_report.txt 2>&1 || true

# Convert colorized output to HTML for better viewing in artifacts
cat pylint_reports/pylint_colorized.txt | ansi2html > pylint_reports/pylint_report.html || true
echo "::endgroup::"

echo "::group::Pylint Results Summary"
# Display summary information
echo "----------------------------------------"
echo "🔍 PYLINT ANALYSIS RESULTS SUMMARY"
echo "----------------------------------------"

# Extract score from pylint output
SCORE=$(grep -oP "Your code has been rated at \K[0-9.]+" pylint_reports/pylint_report.txt || echo "")

# Get counts of different issue types
ERROR_COUNT=$(grep -c "E:" pylint_reports/pylint_report.txt || echo "0")
WARNING_COUNT=$(grep -c "W:" pylint_reports/pylint_report.txt || echo "0")
CONVENTION_COUNT=$(grep -c "C:" pylint_reports/pylint_report.txt || echo "0")
REFACTOR_COUNT=$(grep -c "R:" pylint_reports/pylint_report.txt || echo "0")

# Show summary
echo "Pylint exit code: $PYLINT_EXIT_CODE"
if [ -n "$SCORE" ]; then
echo "📊 Score: $SCORE/10.0"
else
echo "❌ Score: Not available (pylint might have failed)"
fi

echo "📋 Issue counts:"
echo " - Errors (E): $ERROR_COUNT"
echo " - Warnings (W): $WARNING_COUNT"
echo " - Conventions (C): $CONVENTION_COUNT"
echo " - Refactor suggestions (R): $REFACTOR_COUNT"
echo "----------------------------------------"
echo "::endgroup::"

echo "::group::Top Issues (if any)"
# Show the top issues from each category
if [ "$ERROR_COUNT" -gt "0" ]; then
echo "🚨 TOP ERRORS:"
grep "E:" pylint_reports/pylint_report.txt | head -n 5
echo ""
fi

if [ "$WARNING_COUNT" -gt "0" ]; then
echo "⚠️ TOP WARNINGS:"
grep "W:" pylint_reports/pylint_report.txt | head -n 5
echo ""
fi
continue-on-error: true

if [ "$CONVENTION_COUNT" -gt "0" ]; then
echo "📝 TOP CONVENTION ISSUES:"
grep "C:" pylint_reports/pylint_report.txt | head -n 5
echo ""
fi
echo "::endgroup::"

# Check if score exists and if it's below critical threshold
if [ -z "$SCORE" ]; then
echo "❌ Failed to extract pylint score. Pylint might have failed."
echo "Check the full output in the artifacts."
exit 1
fi

# Check if the score is below threshold (must-fix)
if (( $(echo "$SCORE < $THRESHOLD" | bc -l) )); then
echo "::error::❌ CRITICAL: Pylint score $SCORE is below the threshold of $THRESHOLD"
echo "::error::This code has critical quality issues that MUST be fixed before merging."
exit 1
else
echo "✅ Pylint score $SCORE meets or exceeds threshold $THRESHOLD"
fi

- name: Archive test results
uses: actions/upload-artifact@v4
with:
name: test-results
path: |
**/test_results/**/*
pylint_reports/*
if: always()
202 changes: 202 additions & 0 deletions movie_idea_generator/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
"""
Configuration file for pytest.
This file sets up the test environment, including mocking external dependencies.
"""

import sys
import os
from pathlib import Path
from unittest.mock import MagicMock

# Add the root directory to Python path
root_dir = Path(__file__).parent.absolute()
if str(root_dir) not in sys.path:
sys.path.insert(0, str(root_dir))

# Import stubs and set up mock modules
try:
from tests.stubs import LLM, Agent, BaseTool, Crew, Task, Response, load_dotenv, post

# Mock crewai module
class MockCrewAI:
"""Mock crewai module for testing."""
Agent = Agent
Crew = Crew
Task = Task
LLM = LLM

class tools:
"""Mock tools module."""
BaseTool = BaseTool

# Add mocks to sys.modules
sys.modules["crewai"] = MockCrewAI()
sys.modules["crewai.tools"] = MockCrewAI.tools

# Mock requests module if not present
if "requests" not in sys.modules:
class MockRequests:
"""Mock requests module."""
Response = Response
post = post

sys.modules["requests"] = MockRequests()

# Mock dotenv module if not present
if "dotenv" not in sys.modules:
class MockDotEnv:
"""Mock dotenv module."""
load_dotenv = load_dotenv

sys.modules["dotenv"] = MockDotEnv()

except ImportError as e:
print(f"Error setting up test environment: {e}")

# Set environment variable to indicate we're in test mode
os.environ["TESTING"] = "1"

# Set test mode environment variable
os.environ["MOVIE_IDEA_GENERATOR_TEST_MODE"] = "True"
os.environ["OPENAI_API_KEY"] = "test_key_for_pytest_12345"

# Create mock implementations for external dependencies
class MockOpenAIClient:
"""Mock implementation of OpenAI client."""

class ChatCompletion:
"""Mock implementation of ChatCompletion."""

def create(self, **kwargs):
"""Create a mock completion."""
model = kwargs.get("model", "")
messages = kwargs.get("messages", [])
response_format = kwargs.get("response_format", {})

# Extract the prompt from the messages
prompt = ""
for message in messages:
if message.get("role") == "user":
prompt = message.get("content", "")
break

# Create mock response based on prompt and response_format
if "json" in str(response_format):
if "genre" in prompt.lower():
content = '{"genres": ["Sci-Fi", "Drama", "Comedy"]}'
elif "recommend" in prompt.lower():
content = '''
{
"movie": {
"title": "Test Movie",
"creator": "Test Director",
"year": "2020",
"description": "A test movie about testing."
},
"book": {
"title": "Test Book",
"creator": "Test Author",
"year": "2010",
"description": "A test book about testing."
}
}
'''
else:
content = '{"result": "test json result"}'
else:
content = "This is a mock response for testing purposes."

return MagicMock(
choices=[
MagicMock(
message=MagicMock(
content=content
)
)
]
)

def __init__(self):
"""Initialize the mock client."""
self.chat = self.ChatCompletion()


# Mock the OpenAI client in llm.py
def mock_get_openai_client():
"""Return a mock OpenAI client."""
return MockOpenAIClient()


# Mock create_chat_completion function
def mock_create_chat_completion(messages, **kwargs):
"""Mock the create_chat_completion function."""
model = kwargs.get("model", "")
response_format = kwargs.get("response_format", {})

# Extract the prompt from the messages
prompt = ""
for message in messages:
if message.get("role") == "user":
prompt = message.get("content", "")
break

# Create mock response based on prompt and response_format
if "json" in str(response_format):
if "genre" in prompt.lower():
content = '{"genres": ["Sci-Fi", "Drama", "Comedy"]}'
elif "recommend" in prompt.lower():
content = '''
{
"movie": {
"title": "Test Movie",
"creator": "Test Director",
"year": "2020",
"description": "A test movie about testing."
},
"book": {
"title": "Test Book",
"creator": "Test Author",
"year": "2010",
"description": "A test book about testing."
}
}
'''
else:
content = '{"result": "test json result"}'
else:
content = "This is a mock response for testing purposes."

return MagicMock(
choices=[
MagicMock(
message=MagicMock(
content=content
)
)
]
)


# Mock modules
sys.modules["openai"] = MagicMock()

# Patch the get_openai_client and create_chat_completion functions
try:
import src.config.llm
src.config.llm.get_openai_client = mock_get_openai_client
src.config.llm.create_chat_completion = mock_create_chat_completion

# Also provide a mock LLM class
class MockLLM:
"""Mock LLM class for testing."""

def __init__(self, **kwargs):
"""Initialize the mock LLM."""
self.name = kwargs.get("name", "MockLLM")
self.model = kwargs.get("model", "gpt-3.5-turbo")
self.temperature = kwargs.get("temperature", 0.7)

src.config.llm.LLM = MockLLM

except ImportError:
print("Warning: Unable to patch src.config.llm. Tests might fail.")
12 changes: 10 additions & 2 deletions movie_idea_generator/run_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@ YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color

# Get the directory of the script
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"

# Check if pytest is installed
if ! command -v pytest &> /dev/null; then
echo -e "${RED}Error: pytest is not installed. Please install it first:${NC}"
Expand Down Expand Up @@ -48,6 +51,9 @@ done
echo -e "${YELLOW}Running tests for Movie Idea Generator${NC}"
echo -e "${YELLOW}------------------------------------${NC}"

# Make sure we're in the right directory
cd "$SCRIPT_DIR"

# Build the pytest command
cmd="pytest -v"

Expand All @@ -68,9 +74,11 @@ echo -e "${YELLOW}Running command: ${cmd}${NC}"
$cmd

# Check the exit status
if [ $? -eq 0 ]; then
status=$?
if [ $status -eq 0 ]; then
echo -e "${GREEN}All tests passed!${NC}"
exit 0
else
echo -e "${RED}Some tests failed.${NC}"
exit 1
exit $status
fi
Loading