From 905d6be36a40f3e20e87a3be12d9b4de5b95e2bb Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 12 Apr 2026 20:28:43 +0000 Subject: [PATCH 1/5] improve test coverage from 69% to 85%+ with targeted unit tests - Fix syntax errors in agentic modules (unparenthesized except clauses) - Add tests for insights helpers (_score_bucket, _flatten_tags, _parse_iso, _collect_impl_tags) - Add tests for SEO helpers (_lastmod, _build_sitemap_xml, BOT_HTML_TEMPLATE) - Add tests for plots filter helpers (all 15 helper functions) - Add tests for API schemas (all 8 Pydantic models) - Add tests for config resolve_model (CLI/tier mapping) - Add tests for database models (Spec, Library, Impl) - Add tests for database repositories (CRUD, upsert, queries) - Add tests for sync_to_postgres helpers (_validate_quality_score, _validate_spec_id, _parse_markdown_section) - Add extended analytics tests (all 24+ platform detection patterns) https://claude.ai/code/session_01KhAhJKpEoqCzmWzcALSfW6 --- agentic/workflows/modules/agent.py | 6 +- agentic/workflows/modules/orchestrator.py | 2 +- agentic/workflows/modules/state.py | 2 +- tests/unit/api/test_analytics_extended.py | 116 +++++ tests/unit/api/test_insights_helpers.py | 187 ++++++++ tests/unit/api/test_plots_helpers.py | 449 ++++++++++++++++++ tests/unit/api/test_schemas.py | 208 ++++++++ tests/unit/api/test_seo_helpers.py | 148 ++++++ .../automation/scripts/test_sync_helpers.py | 119 +++++ tests/unit/core/database/test_models.py | 155 ++++++ tests/unit/core/database/test_repositories.py | 275 +++++++++++ tests/unit/core/test_config_resolve.py | 73 +++ 12 files changed, 1735 insertions(+), 5 deletions(-) create mode 100644 tests/unit/api/test_analytics_extended.py create mode 100644 tests/unit/api/test_insights_helpers.py create mode 100644 tests/unit/api/test_plots_helpers.py create mode 100644 tests/unit/api/test_schemas.py create mode 100644 tests/unit/api/test_seo_helpers.py create mode 100644 tests/unit/automation/scripts/test_sync_helpers.py create mode 100644 tests/unit/core/database/test_models.py create mode 100644 tests/unit/core/database/test_repositories.py create mode 100644 tests/unit/core/test_config_resolve.py diff --git a/agentic/workflows/modules/agent.py b/agentic/workflows/modules/agent.py index 950cb43046..947246f38d 100644 --- a/agentic/workflows/modules/agent.py +++ b/agentic/workflows/modules/agent.py @@ -144,7 +144,7 @@ def parse_json(output: str, target_type: Type[T] = None) -> Any: return [target_type.model_validate(item) for item in parsed] return target_type.model_validate(parsed) return parsed - except json.JSONDecodeError, ValueError: + except (json.JSONDecodeError, ValueError): pass # Strategy 2: Strip markdown code fences @@ -162,7 +162,7 @@ def parse_json(output: str, target_type: Type[T] = None) -> Any: return [target_type.model_validate(item) for item in parsed] return target_type.model_validate(parsed) return parsed - except json.JSONDecodeError, ValueError: + except (json.JSONDecodeError, ValueError): pass # Strategy 3: Find first JSON array or object in output @@ -182,7 +182,7 @@ def parse_json(output: str, target_type: Type[T] = None) -> Any: return [target_type.model_validate(item) for item in parsed] return target_type.model_validate(parsed) return parsed - except json.JSONDecodeError, ValueError: + except (json.JSONDecodeError, ValueError): continue raise json.JSONDecodeError("No valid JSON found in output", output, 0) diff --git a/agentic/workflows/modules/orchestrator.py b/agentic/workflows/modules/orchestrator.py index 1934234d88..a3f5cf5b59 100644 --- a/agentic/workflows/modules/orchestrator.py +++ b/agentic/workflows/modules/orchestrator.py @@ -45,5 +45,5 @@ def extract_run_id(stdout: str) -> str | None: try: data = json.loads(stdout.strip()) return data.get("run_id") - except json.JSONDecodeError, ValueError: + except (json.JSONDecodeError, ValueError): return None diff --git a/agentic/workflows/modules/state.py b/agentic/workflows/modules/state.py index 47387c54c3..ff81d60746 100644 --- a/agentic/workflows/modules/state.py +++ b/agentic/workflows/modules/state.py @@ -172,7 +172,7 @@ def from_stdin(cls) -> Optional["WorkflowState"]: state = cls(run_id=run_id, prompt=data.get("prompt", "")) state.data = data return state - except json.JSONDecodeError, EOFError: + except (json.JSONDecodeError, EOFError): return None def to_stdout(self) -> None: diff --git a/tests/unit/api/test_analytics_extended.py b/tests/unit/api/test_analytics_extended.py new file mode 100644 index 0000000000..667bd17212 --- /dev/null +++ b/tests/unit/api/test_analytics_extended.py @@ -0,0 +1,116 @@ +""" +Extended tests for analytics module. + +Covers edge cases and additional platform patterns. +""" + +import pytest + +from api.analytics import ( + PLATFORM_PATTERNS, + _detect_whatsapp_variant, + detect_platform, +) + + +class TestDetectWhatsappVariant: + """Tests for _detect_whatsapp_variant edge cases.""" + + def test_no_whatsapp(self) -> None: + assert _detect_whatsapp_variant("Mozilla/5.0") is None + + def test_real_whatsapp_ios(self) -> None: + assert _detect_whatsapp_variant("WhatsApp/2.23.18.78 i") == "whatsapp" + + def test_real_whatsapp_android(self) -> None: + assert _detect_whatsapp_variant("WhatsApp/2.21.22.23 A") == "whatsapp" + + def test_signal_spoofed_simple(self) -> None: + assert _detect_whatsapp_variant("WhatsApp") == "whatsapp-lite" + + def test_signal_spoofed_short_version(self) -> None: + assert _detect_whatsapp_variant("WhatsApp/2") == "whatsapp-lite" + + def test_signal_spoofed_two_part_version(self) -> None: + assert _detect_whatsapp_variant("WhatsApp/2.23") == "whatsapp-lite" + + def test_case_insensitive(self) -> None: + assert _detect_whatsapp_variant("whatsapp/2.23.18.78 i") == "whatsapp" + + +class TestDetectPlatformExtended: + """Additional platform detection tests for full coverage.""" + + def test_slack(self) -> None: + assert detect_platform("Slackbot 1.0 (+https://api.slack.com/robots)") == "slack" + + def test_discord(self) -> None: + assert detect_platform("Mozilla/5.0 (compatible; Discordbot/2.0)") == "discord" + + def test_telegram(self) -> None: + assert detect_platform("TelegramBot/1.0") == "telegram" + + def test_linkedin(self) -> None: + assert detect_platform("LinkedInBot/1.0") == "linkedin" + + def test_pinterest(self) -> None: + assert detect_platform("Pinterestbot/1.0") == "pinterest" + + def test_reddit(self) -> None: + assert detect_platform("redditbot/1.0") == "reddit" + + def test_google(self) -> None: + assert detect_platform("Mozilla/5.0 (compatible; Googlebot/2.1)") == "google" + + def test_bing(self) -> None: + assert detect_platform("Mozilla/5.0 (compatible; bingbot/2.0)") == "bing" + + def test_mastodon(self) -> None: + assert detect_platform("http.rb/5.0.0 (Mastodon/4.0; +https://instance.social/)") == "mastodon" + + def test_viber(self) -> None: + assert detect_platform("Viber/13.0") == "viber" + + def test_skype(self) -> None: + assert detect_platform("SkypeUriPreview") == "skype" + + def test_teams(self) -> None: + assert detect_platform("Mozilla/5.0 Microsoft Teams") == "teams" + + def test_snapchat(self) -> None: + assert detect_platform("Snapchat/10.0") == "snapchat" + + def test_yandex(self) -> None: + assert detect_platform("Mozilla/5.0 (compatible; YandexBot/3.0)") == "yandex" + + def test_duckduckgo(self) -> None: + assert detect_platform("DuckDuckBot/1.0") == "duckduckgo" + + def test_baidu(self) -> None: + assert detect_platform("Mozilla/5.0 (compatible; Baiduspider/2.0)") == "baidu" + + def test_apple(self) -> None: + assert detect_platform("Applebot/0.1") == "apple" + + def test_embedly(self) -> None: + assert detect_platform("Embedly/0.2") == "embedly" + + def test_quora(self) -> None: + assert detect_platform("Quora Link Preview/1.0") == "quora" + + def test_tumblr(self) -> None: + assert detect_platform("Tumblr/14.0") == "tumblr" + + def test_unknown_agent(self) -> None: + assert detect_platform("Some Random Bot/1.0") == "unknown" + + def test_empty_agent(self) -> None: + assert detect_platform("") == "unknown" + + def test_whatsapp_takes_priority(self) -> None: + """WhatsApp detection happens before general pattern matching.""" + assert detect_platform("WhatsApp/2.23.18.78 i") == "whatsapp" + + def test_platform_patterns_not_empty(self) -> None: + """Ensure we have a comprehensive set of platform patterns.""" + assert len(PLATFORM_PATTERNS) >= 20 diff --git a/tests/unit/api/test_insights_helpers.py b/tests/unit/api/test_insights_helpers.py new file mode 100644 index 0000000000..c6b6483917 --- /dev/null +++ b/tests/unit/api/test_insights_helpers.py @@ -0,0 +1,187 @@ +""" +Tests for insights helper functions. + +Directly tests the pure helper functions in api/routers/insights.py +that don't require database or HTTP setup. +""" + +from datetime import datetime, timezone + +import pytest + +from api.routers.insights import ( + _collect_impl_tags, + _flatten_tags, + _parse_iso, + _score_bucket, +) + + +class TestScoreBucket: + """Tests for _score_bucket mapping.""" + + def test_minimum_score(self) -> None: + assert _score_bucket(50) == "50-55" + + def test_maximum_score(self) -> None: + assert _score_bucket(100) == "95-100" + + def test_middle_scores(self) -> None: + assert _score_bucket(72) == "70-75" + assert _score_bucket(85) == "85-90" + assert _score_bucket(90) == "90-95" + + def test_boundary_at_55(self) -> None: + assert _score_bucket(55) == "55-60" + + def test_below_50_clamped(self) -> None: + assert _score_bucket(30) == "50-55" + + def test_above_100_clamped(self) -> None: + assert _score_bucket(110) == "95-100" + + def test_exact_boundary(self) -> None: + assert _score_bucket(75) == "75-80" + assert _score_bucket(80) == "80-85" + + def test_fractional_score(self) -> None: + assert _score_bucket(92.5) == "90-95" + assert _score_bucket(87.9) == "85-90" + + +class TestFlattenTags: + """Tests for _flatten_tags.""" + + def test_none_tags(self) -> None: + assert _flatten_tags(None) == set() + + def test_empty_dict(self) -> None: + assert _flatten_tags({}) == set() + + def test_single_category(self) -> None: + tags = {"plot_type": ["scatter"]} + assert _flatten_tags(tags) == {"plot_type:scatter"} + + def test_multiple_categories(self) -> None: + tags = {"plot_type": ["scatter", "line"], "domain": ["statistics"]} + result = _flatten_tags(tags) + assert result == {"plot_type:scatter", "plot_type:line", "domain:statistics"} + + def test_non_list_values_skipped(self) -> None: + tags = {"plot_type": ["scatter"], "invalid": "not-a-list"} + result = _flatten_tags(tags) + assert result == {"plot_type:scatter"} + + def test_empty_list(self) -> None: + tags = {"plot_type": []} + assert _flatten_tags(tags) == set() + + +class TestParseIso: + """Tests for _parse_iso.""" + + def test_none_input(self) -> None: + assert _parse_iso(None) is None + + def test_empty_string(self) -> None: + assert _parse_iso("") is None + + def test_valid_iso_with_z(self) -> None: + result = _parse_iso("2025-01-15T10:30:00Z") + assert result is not None + assert result.year == 2025 + assert result.month == 1 + assert result.tzinfo is not None + + def test_valid_iso_with_offset(self) -> None: + result = _parse_iso("2025-01-15T10:30:00+02:00") + assert result is not None + assert result.tzinfo is not None + + def test_naive_datetime_gets_utc(self) -> None: + result = _parse_iso("2025-01-15T10:30:00") + assert result is not None + assert result.tzinfo == timezone.utc + + def test_invalid_string(self) -> None: + assert _parse_iso("not-a-date") is None + + def test_date_only(self) -> None: + result = _parse_iso("2025-01-15") + assert result is not None + assert result.year == 2025 + + +class TestCollectImplTags: + """Tests for _collect_impl_tags.""" + + def test_spec_with_no_tags(self) -> None: + from unittest.mock import MagicMock + + spec = MagicMock() + spec.tags = None + spec.impls = [] + result = _collect_impl_tags(spec) + assert result == set() + + def test_spec_with_tags_and_impl_tags(self) -> None: + from unittest.mock import MagicMock + + impl = MagicMock() + impl.library_id = "matplotlib" + impl.impl_tags = {"techniques": ["annotations"]} + + spec = MagicMock() + spec.tags = {"plot_type": ["scatter"]} + spec.impls = [impl] + + result = _collect_impl_tags(spec) + assert "plot_type:scatter" in result + assert "techniques:annotations" in result + + def test_filter_by_library(self) -> None: + from unittest.mock import MagicMock + + impl1 = MagicMock() + impl1.library_id = "matplotlib" + impl1.impl_tags = {"techniques": ["annotations"]} + + impl2 = MagicMock() + impl2.library_id = "seaborn" + impl2.impl_tags = {"techniques": ["regression"]} + + spec = MagicMock() + spec.tags = {"plot_type": ["scatter"]} + spec.impls = [impl1, impl2] + + result = _collect_impl_tags(spec, library="matplotlib") + assert "techniques:annotations" in result + assert "techniques:regression" not in result + + def test_impl_tags_none(self) -> None: + from unittest.mock import MagicMock + + impl = MagicMock() + impl.library_id = "matplotlib" + impl.impl_tags = None + + spec = MagicMock() + spec.tags = {"plot_type": ["scatter"]} + spec.impls = [impl] + + result = _collect_impl_tags(spec) + assert result == {"plot_type:scatter"} + + def test_impl_tags_not_dict(self) -> None: + from unittest.mock import MagicMock + + impl = MagicMock() + impl.library_id = "matplotlib" + impl.impl_tags = "not-a-dict" + + spec = MagicMock() + spec.tags = {} + spec.impls = [impl] + + result = _collect_impl_tags(spec) + assert result == set() diff --git a/tests/unit/api/test_plots_helpers.py b/tests/unit/api/test_plots_helpers.py new file mode 100644 index 0000000000..f13c48d958 --- /dev/null +++ b/tests/unit/api/test_plots_helpers.py @@ -0,0 +1,449 @@ +""" +Tests for plots filter helper functions. + +Directly tests the pure helper functions in api/routers/plots.py. +""" + +from unittest.mock import MagicMock + +import pytest + +from api.routers.plots import ( + _build_cache_key, + _build_impl_lookup, + _build_spec_lookup, + _calculate_contextual_counts, + _calculate_global_counts, + _calculate_or_counts, + _category_matches_filter, + _collect_all_images, + _create_empty_counts, + _filter_images, + _get_category_values, + _image_matches_groups, + _increment_category_counts, + _sort_counts, +) + + +class TestGetCategoryValues: + """Tests for _get_category_values.""" + + def test_lib_category(self) -> None: + result = _get_category_values("lib", "scatter-basic", "matplotlib", {}, {}) + assert result == ["matplotlib"] + + def test_spec_category(self) -> None: + result = _get_category_values("spec", "scatter-basic", "matplotlib", {}, {}) + assert result == ["scatter-basic"] + + def test_spec_level_category(self) -> None: + spec_tags = {"plot_type": ["scatter", "line"]} + result = _get_category_values("plot", "s", "m", spec_tags, {}) + assert result == ["scatter", "line"] + + def test_impl_level_category(self) -> None: + impl_tags = {"techniques": ["annotations", "colorbar"]} + result = _get_category_values("tech", "s", "m", {}, impl_tags) + assert result == ["annotations", "colorbar"] + + def test_unknown_category(self) -> None: + result = _get_category_values("unknown", "s", "m", {}, {}) + assert result == [] + + def test_data_category(self) -> None: + spec_tags = {"data_type": ["numeric"]} + result = _get_category_values("data", "s", "m", spec_tags, {}) + assert result == ["numeric"] + + def test_dom_category(self) -> None: + spec_tags = {"domain": ["statistics", "finance"]} + result = _get_category_values("dom", "s", "m", spec_tags, {}) + assert result == ["statistics", "finance"] + + def test_feat_category(self) -> None: + spec_tags = {"features": ["basic", "3d"]} + result = _get_category_values("feat", "s", "m", spec_tags, {}) + assert result == ["basic", "3d"] + + def test_dep_category(self) -> None: + impl_tags = {"dependencies": ["scipy"]} + result = _get_category_values("dep", "s", "m", {}, impl_tags) + assert result == ["scipy"] + + def test_pat_category(self) -> None: + impl_tags = {"patterns": ["data-generation"]} + result = _get_category_values("pat", "s", "m", {}, impl_tags) + assert result == ["data-generation"] + + def test_prep_category(self) -> None: + impl_tags = {"dataprep": ["binning"]} + result = _get_category_values("prep", "s", "m", {}, impl_tags) + assert result == ["binning"] + + def test_style_category(self) -> None: + impl_tags = {"styling": ["minimal-chrome"]} + result = _get_category_values("style", "s", "m", {}, impl_tags) + assert result == ["minimal-chrome"] + + def test_missing_key_in_tags(self) -> None: + spec_tags = {"other_key": ["value"]} + result = _get_category_values("plot", "s", "m", spec_tags, {}) + assert result == [] + + +class TestCategoryMatchesFilter: + """Tests for _category_matches_filter.""" + + def test_matching_lib(self) -> None: + assert _category_matches_filter("lib", ["matplotlib"], "s", "matplotlib", {}, {}) is True + + def test_non_matching_lib(self) -> None: + assert _category_matches_filter("lib", ["seaborn"], "s", "matplotlib", {}, {}) is False + + def test_one_of_multiple_values_matches(self) -> None: + assert _category_matches_filter("lib", ["seaborn", "matplotlib"], "s", "matplotlib", {}, {}) is True + + def test_matching_spec_tag(self) -> None: + spec_tags = {"plot_type": ["scatter"]} + assert _category_matches_filter("plot", ["scatter"], "s", "m", spec_tags, {}) is True + + def test_matching_impl_tag(self) -> None: + impl_tags = {"techniques": ["annotations"]} + assert _category_matches_filter("tech", ["annotations"], "s", "m", {}, impl_tags) is True + + +class TestImageMatchesGroups: + """Tests for _image_matches_groups.""" + + def test_empty_groups_matches_all(self) -> None: + spec_lookup = {"s1": {"tags": {}}} + impl_lookup = {} + assert _image_matches_groups("s1", "matplotlib", [], spec_lookup, impl_lookup) is True + + def test_single_group_match(self) -> None: + spec_lookup = {"s1": {"tags": {"plot_type": ["scatter"]}}} + impl_lookup = {} + groups = [{"category": "plot", "values": ["scatter"]}] + assert _image_matches_groups("s1", "matplotlib", groups, spec_lookup, impl_lookup) is True + + def test_single_group_no_match(self) -> None: + spec_lookup = {"s1": {"tags": {"plot_type": ["bar"]}}} + impl_lookup = {} + groups = [{"category": "plot", "values": ["scatter"]}] + assert _image_matches_groups("s1", "matplotlib", groups, spec_lookup, impl_lookup) is False + + def test_multiple_groups_and_logic(self) -> None: + spec_lookup = {"s1": {"tags": {"plot_type": ["scatter"], "domain": ["statistics"]}}} + impl_lookup = {} + groups = [ + {"category": "plot", "values": ["scatter"]}, + {"category": "dom", "values": ["statistics"]}, + ] + assert _image_matches_groups("s1", "matplotlib", groups, spec_lookup, impl_lookup) is True + + def test_multiple_groups_one_fails(self) -> None: + spec_lookup = {"s1": {"tags": {"plot_type": ["scatter"], "domain": ["finance"]}}} + impl_lookup = {} + groups = [ + {"category": "plot", "values": ["scatter"]}, + {"category": "dom", "values": ["statistics"]}, + ] + assert _image_matches_groups("s1", "matplotlib", groups, spec_lookup, impl_lookup) is False + + def test_spec_not_in_lookup(self) -> None: + assert _image_matches_groups("unknown", "matplotlib", [], {}, {}) is False + + def test_impl_tags_matching(self) -> None: + spec_lookup = {"s1": {"tags": {}}} + impl_lookup = {("s1", "matplotlib"): {"techniques": ["annotations"]}} + groups = [{"category": "tech", "values": ["annotations"]}] + assert _image_matches_groups("s1", "matplotlib", groups, spec_lookup, impl_lookup) is True + + +class TestCreateEmptyCounts: + """Tests for _create_empty_counts.""" + + def test_has_all_categories(self) -> None: + counts = _create_empty_counts() + expected = {"lib", "spec", "plot", "data", "dom", "feat", "dep", "tech", "pat", "prep", "style"} + assert set(counts.keys()) == expected + + def test_all_categories_empty(self) -> None: + counts = _create_empty_counts() + for category in counts.values(): + assert category == {} + + +class TestIncrementCategoryCounts: + """Tests for _increment_category_counts.""" + + def test_increments_all_categories(self) -> None: + counts = _create_empty_counts() + spec_tags = {"plot_type": ["scatter"], "domain": ["statistics"]} + impl_tags = {"techniques": ["annotations"]} + _increment_category_counts(counts, "scatter-basic", "matplotlib", spec_tags, impl_tags) + + assert counts["lib"]["matplotlib"] == 1 + assert counts["spec"]["scatter-basic"] == 1 + assert counts["plot"]["scatter"] == 1 + assert counts["dom"]["statistics"] == 1 + assert counts["tech"]["annotations"] == 1 + + def test_increments_existing_counts(self) -> None: + counts = _create_empty_counts() + counts["lib"]["matplotlib"] = 5 + _increment_category_counts(counts, "s", "matplotlib", {}, {}) + assert counts["lib"]["matplotlib"] == 6 + + +class TestSortCounts: + """Tests for _sort_counts.""" + + def test_sorts_by_count_descending(self) -> None: + counts = {"lib": {"a": 1, "b": 3, "c": 2}} + result = _sort_counts(counts) + keys = list(result["lib"].keys()) + assert keys == ["b", "c", "a"] + + def test_alphabetical_on_tie(self) -> None: + counts = {"lib": {"b": 2, "a": 2, "c": 2}} + result = _sort_counts(counts) + keys = list(result["lib"].keys()) + assert keys == ["a", "b", "c"] + + +class TestBuildCacheKey: + """Tests for _build_cache_key.""" + + def test_empty_groups(self) -> None: + assert _build_cache_key([]) == "filter:all" + + def test_single_group(self) -> None: + groups = [{"category": "lib", "values": ["matplotlib"]}] + result = _build_cache_key(groups) + assert result == "filter:lib=matplotlib" + + def test_multiple_groups_sorted(self) -> None: + groups = [ + {"category": "plot", "values": ["scatter"]}, + {"category": "lib", "values": ["matplotlib"]}, + ] + result = _build_cache_key(groups) + assert result == "filter:lib=matplotlib:plot=scatter" + + def test_values_sorted(self) -> None: + groups = [{"category": "lib", "values": ["seaborn", "matplotlib"]}] + result = _build_cache_key(groups) + assert result == "filter:lib=matplotlib,seaborn" + + def test_stable_key_different_order(self) -> None: + groups1 = [ + {"category": "lib", "values": ["matplotlib"]}, + {"category": "plot", "values": ["scatter"]}, + ] + groups2 = [ + {"category": "plot", "values": ["scatter"]}, + {"category": "lib", "values": ["matplotlib"]}, + ] + assert _build_cache_key(groups1) == _build_cache_key(groups2) + + +class TestBuildSpecLookup: + """Tests for _build_spec_lookup.""" + + def test_with_impls(self) -> None: + spec = MagicMock() + spec.id = "scatter-basic" + spec.tags = {"plot_type": ["scatter"]} + spec.impls = [MagicMock()] + + result = _build_spec_lookup([spec]) + assert "scatter-basic" in result + assert result["scatter-basic"]["tags"] == {"plot_type": ["scatter"]} + + def test_without_impls_excluded(self) -> None: + spec = MagicMock() + spec.id = "no-impls" + spec.impls = [] + + result = _build_spec_lookup([spec]) + assert "no-impls" not in result + + def test_none_tags_default_empty(self) -> None: + spec = MagicMock() + spec.id = "s1" + spec.tags = None + spec.impls = [MagicMock()] + + result = _build_spec_lookup([spec]) + assert result["s1"]["tags"] == {} + + +class TestBuildImplLookup: + """Tests for _build_impl_lookup.""" + + def test_with_preview_url(self) -> None: + impl = MagicMock() + impl.library_id = "matplotlib" + impl.preview_url = "https://example.com/img.png" + impl.impl_tags = {"techniques": ["annotations"]} + + spec = MagicMock() + spec.id = "s1" + spec.impls = [impl] + + result = _build_impl_lookup([spec]) + assert ("s1", "matplotlib") in result + assert result[("s1", "matplotlib")] == {"techniques": ["annotations"]} + + def test_without_preview_url_excluded(self) -> None: + impl = MagicMock() + impl.library_id = "matplotlib" + impl.preview_url = None + + spec = MagicMock() + spec.id = "s1" + spec.impls = [impl] + + result = _build_impl_lookup([spec]) + assert ("s1", "matplotlib") not in result + + def test_none_impl_tags_default_empty(self) -> None: + impl = MagicMock() + impl.library_id = "matplotlib" + impl.preview_url = "https://example.com/img.png" + impl.impl_tags = None + + spec = MagicMock() + spec.id = "s1" + spec.impls = [impl] + + result = _build_impl_lookup([spec]) + assert result[("s1", "matplotlib")] == {} + + +class TestCollectAllImages: + """Tests for _collect_all_images.""" + + def test_collects_images_with_preview(self) -> None: + impl = MagicMock() + impl.library_id = "matplotlib" + impl.preview_url = "https://example.com/img.png" + impl.preview_html = None + impl.quality_score = 92.5 + + spec = MagicMock() + spec.id = "scatter-basic" + spec.title = "Basic Scatter" + spec.impls = [impl] + + result = _collect_all_images([spec]) + assert len(result) == 1 + assert result[0]["spec_id"] == "scatter-basic" + assert result[0]["library"] == "matplotlib" + assert result[0]["title"] == "Basic Scatter" + + def test_skips_without_preview(self) -> None: + impl = MagicMock() + impl.library_id = "matplotlib" + impl.preview_url = None + + spec = MagicMock() + spec.id = "s1" + spec.impls = [impl] + + result = _collect_all_images([spec]) + assert len(result) == 0 + + def test_skips_without_impls(self) -> None: + spec = MagicMock() + spec.id = "s1" + spec.impls = [] + + result = _collect_all_images([spec]) + assert len(result) == 0 + + +class TestFilterImages: + """Tests for _filter_images.""" + + def test_no_filters_returns_all(self) -> None: + images = [{"spec_id": "s1", "library": "matplotlib"}] + spec_lookup = {"s1": {"tags": {}}} + impl_lookup = {} + result = _filter_images(images, [], spec_lookup, impl_lookup) + assert len(result) == 1 + + def test_filter_by_lib(self) -> None: + images = [ + {"spec_id": "s1", "library": "matplotlib"}, + {"spec_id": "s1", "library": "seaborn"}, + ] + spec_lookup = {"s1": {"tags": {}}} + impl_lookup = {} + groups = [{"category": "lib", "values": ["matplotlib"]}] + result = _filter_images(images, groups, spec_lookup, impl_lookup) + assert len(result) == 1 + assert result[0]["library"] == "matplotlib" + + +class TestCalculateGlobalCounts: + """Tests for _calculate_global_counts.""" + + def test_counts_all_images(self) -> None: + impl1 = MagicMock() + impl1.library_id = "matplotlib" + impl1.preview_url = "https://example.com/img.png" + impl1.impl_tags = {} + + impl2 = MagicMock() + impl2.library_id = "seaborn" + impl2.preview_url = "https://example.com/img2.png" + impl2.impl_tags = {} + + spec = MagicMock() + spec.id = "scatter-basic" + spec.tags = {"plot_type": ["scatter"]} + spec.impls = [impl1, impl2] + + result = _calculate_global_counts([spec]) + assert result["lib"]["matplotlib"] == 1 + assert result["lib"]["seaborn"] == 1 + assert result["plot"]["scatter"] == 2 + + def test_empty_specs(self) -> None: + result = _calculate_global_counts([]) + assert all(len(v) == 0 for v in result.values()) + + +class TestCalculateContextualCounts: + """Tests for _calculate_contextual_counts.""" + + def test_counts_filtered_images(self) -> None: + filtered = [{"spec_id": "s1", "library": "matplotlib"}] + spec_tags = {"s1": {"plot_type": ["scatter"]}} + impl_lookup = {} + result = _calculate_contextual_counts(filtered, spec_tags, impl_lookup) + assert result["lib"]["matplotlib"] == 1 + assert result["plot"]["scatter"] == 1 + + +class TestCalculateOrCounts: + """Tests for _calculate_or_counts.""" + + def test_or_counts_for_single_group(self) -> None: + filter_groups = [{"category": "lib", "values": ["matplotlib"]}] + all_images = [ + {"spec_id": "s1", "library": "matplotlib"}, + {"spec_id": "s1", "library": "seaborn"}, + ] + spec_id_to_tags = {"s1": {}} + spec_lookup = {"s1": {"tags": {}}} + impl_lookup = {} + + result = _calculate_or_counts(filter_groups, all_images, spec_id_to_tags, spec_lookup, impl_lookup) + assert len(result) == 1 + # With no other groups, all images are included + assert result[0]["matplotlib"] == 1 + assert result[0]["seaborn"] == 1 diff --git a/tests/unit/api/test_schemas.py b/tests/unit/api/test_schemas.py new file mode 100644 index 0000000000..433fc23194 --- /dev/null +++ b/tests/unit/api/test_schemas.py @@ -0,0 +1,208 @@ +""" +Tests for API Pydantic schemas. + +Validates schema creation, defaults, and serialization. +""" + +import pytest + +from api.schemas import ( + FilterCountsResponse, + FilteredPlotsResponse, + ImageResponse, + ImplementationResponse, + LibraryInfo, + SpecDetailResponse, + SpecListItem, + StatsResponse, +) + + +class TestImplementationResponse: + """Tests for ImplementationResponse schema.""" + + def test_minimal_creation(self) -> None: + impl = ImplementationResponse(library_id="matplotlib", library_name="Matplotlib") + assert impl.library_id == "matplotlib" + assert impl.library_name == "Matplotlib" + assert impl.preview_url is None + assert impl.quality_score is None + assert impl.code is None + assert impl.review_strengths == [] + assert impl.review_weaknesses == [] + assert impl.impl_tags is None + + def test_full_creation(self) -> None: + impl = ImplementationResponse( + library_id="matplotlib", + library_name="Matplotlib", + preview_url="https://example.com/img.png", + preview_html="
chart
", + quality_score=92.5, + code="import matplotlib", + generated_at="2025-01-01T00:00:00", + updated="2025-01-02T00:00:00", + generated_by="claude", + python_version="3.13", + library_version="3.10.0", + review_strengths=["clean code"], + review_weaknesses=["needs labels"], + review_image_description="A scatter plot", + review_criteria_checklist={"visual_quality": {"score": 36}}, + review_verdict="APPROVED", + impl_tags={"techniques": ["annotations"]}, + ) + assert impl.quality_score == 92.5 + assert impl.review_verdict == "APPROVED" + assert impl.impl_tags == {"techniques": ["annotations"]} + + def test_serialization(self) -> None: + impl = ImplementationResponse(library_id="matplotlib", library_name="Matplotlib") + data = impl.model_dump() + assert data["library_id"] == "matplotlib" + assert data["review_strengths"] == [] + + +class TestSpecDetailResponse: + """Tests for SpecDetailResponse schema.""" + + def test_minimal_creation(self) -> None: + spec = SpecDetailResponse(id="scatter-basic", title="Basic Scatter") + assert spec.id == "scatter-basic" + assert spec.description is None + assert spec.applications == [] + assert spec.implementations == [] + assert spec.tags is None + assert spec.issue is None + + def test_with_implementations(self) -> None: + impl = ImplementationResponse(library_id="matplotlib", library_name="Matplotlib") + spec = SpecDetailResponse( + id="scatter-basic", + title="Basic Scatter", + description="A scatter plot", + implementations=[impl], + tags={"plot_type": ["scatter"]}, + issue=42, + ) + assert len(spec.implementations) == 1 + assert spec.implementations[0].library_id == "matplotlib" + + +class TestSpecListItem: + """Tests for SpecListItem schema.""" + + def test_minimal_creation(self) -> None: + item = SpecListItem(id="scatter-basic", title="Basic Scatter") + assert item.library_count == 0 + assert item.description is None + assert item.tags is None + + def test_with_library_count(self) -> None: + item = SpecListItem(id="scatter-basic", title="Basic Scatter", library_count=5) + assert item.library_count == 5 + + +class TestImageResponse: + """Tests for ImageResponse schema.""" + + def test_minimal(self) -> None: + img = ImageResponse(spec_id="scatter-basic", library="matplotlib") + assert img.url is None + assert img.html is None + assert img.code is None + + def test_full(self) -> None: + img = ImageResponse( + spec_id="scatter-basic", + library="matplotlib", + url="https://example.com/img.png", + html="
", + code="import matplotlib", + ) + assert img.url == "https://example.com/img.png" + + +class TestFilterCountsResponse: + """Tests for FilterCountsResponse schema.""" + + def test_defaults_empty(self) -> None: + counts = FilterCountsResponse() + assert counts.lib == {} + assert counts.spec == {} + assert counts.plot == {} + assert counts.data == {} + assert counts.dom == {} + assert counts.feat == {} + assert counts.dep == {} + assert counts.tech == {} + assert counts.pat == {} + assert counts.prep == {} + assert counts.style == {} + + def test_with_counts(self) -> None: + counts = FilterCountsResponse( + lib={"matplotlib": 5, "seaborn": 3}, + plot={"scatter": 8}, + ) + assert counts.lib["matplotlib"] == 5 + + +class TestFilteredPlotsResponse: + """Tests for FilteredPlotsResponse schema.""" + + def test_minimal(self) -> None: + resp = FilteredPlotsResponse( + total=0, images=[], counts={}, globalCounts={}, orCounts=[] + ) + assert resp.total == 0 + assert resp.offset == 0 + assert resp.limit is None + assert resp.specTitles == {} + + def test_with_pagination(self) -> None: + resp = FilteredPlotsResponse( + total=100, + images=[{"spec_id": "s1", "library": "matplotlib"}], + counts={}, + globalCounts={}, + orCounts=[], + offset=10, + limit=20, + ) + assert resp.offset == 10 + assert resp.limit == 20 + + +class TestLibraryInfo: + """Tests for LibraryInfo schema.""" + + def test_minimal(self) -> None: + lib = LibraryInfo(id="matplotlib", name="Matplotlib") + assert lib.version is None + assert lib.documentation_url is None + + def test_full(self) -> None: + lib = LibraryInfo( + id="matplotlib", + name="Matplotlib", + version="3.10.0", + documentation_url="https://matplotlib.org", + description="A comprehensive plotting library", + ) + assert lib.version == "3.10.0" + + +class TestStatsResponse: + """Tests for StatsResponse schema.""" + + def test_creation(self) -> None: + stats = StatsResponse(specs=100, plots=500, libraries=9) + assert stats.specs == 100 + assert stats.plots == 500 + assert stats.libraries == 9 + + def test_serialization(self) -> None: + stats = StatsResponse(specs=10, plots=50, libraries=9) + data = stats.model_dump() + assert data == {"specs": 10, "plots": 50, "libraries": 9} diff --git a/tests/unit/api/test_seo_helpers.py b/tests/unit/api/test_seo_helpers.py new file mode 100644 index 0000000000..ab1cd6b2b2 --- /dev/null +++ b/tests/unit/api/test_seo_helpers.py @@ -0,0 +1,148 @@ +""" +Tests for SEO helper functions. + +Directly tests the pure helper functions in api/routers/seo.py. +""" + +from datetime import datetime +from unittest.mock import MagicMock + +import pytest + +from api.routers.seo import BOT_HTML_TEMPLATE, _build_sitemap_xml, _lastmod + + +class TestLastmod: + """Tests for _lastmod helper.""" + + def test_with_datetime(self) -> None: + dt = datetime(2025, 3, 15) + result = _lastmod(dt) + assert result == "2025-03-15" + + def test_with_none(self) -> None: + assert _lastmod(None) == "" + + def test_with_different_date(self) -> None: + dt = datetime(2024, 12, 1, 10, 30, 0) + result = _lastmod(dt) + assert result == "2024-12-01" + + +class TestBuildSitemapXml: + """Tests for _build_sitemap_xml.""" + + def test_empty_specs(self) -> None: + result = _build_sitemap_xml([]) + assert '" in result + + def test_spec_with_impls(self) -> None: + impl = MagicMock() + impl.library_id = "matplotlib" + impl.updated = datetime(2025, 3, 15) + + spec = MagicMock() + spec.id = "scatter-basic" + spec.impls = [impl] + spec.updated = datetime(2025, 3, 14) + + result = _build_sitemap_xml([spec]) + assert "https://pyplots.ai/scatter-basic" in result + assert "https://pyplots.ai/scatter-basic/matplotlib" in result + assert "2025-03-14" in result + assert "2025-03-15" in result + + def test_spec_without_impls_excluded(self) -> None: + spec = MagicMock() + spec.id = "no-impls" + spec.impls = [] + + result = _build_sitemap_xml([spec]) + assert "no-impls" not in result + + def test_multiple_specs(self) -> None: + impl1 = MagicMock() + impl1.library_id = "matplotlib" + impl1.updated = None + + spec1 = MagicMock() + spec1.id = "scatter-basic" + spec1.impls = [impl1] + spec1.updated = None + + impl2 = MagicMock() + impl2.library_id = "seaborn" + impl2.updated = None + + spec2 = MagicMock() + spec2.id = "bar-grouped" + spec2.impls = [impl2] + spec2.updated = None + + result = _build_sitemap_xml([spec1, spec2]) + assert "scatter-basic" in result + assert "bar-grouped" in result + assert "scatter-basic/matplotlib" in result + assert "bar-grouped/seaborn" in result + + def test_html_escaping(self) -> None: + """Spec IDs with special characters should be escaped.""" + impl = MagicMock() + impl.library_id = "matplotlib" + impl.updated = None + + spec = MagicMock() + spec.id = "test&spec" + spec.impls = [impl] + spec.updated = None + + result = _build_sitemap_xml([spec]) + assert "test&spec" in result + + def test_spec_with_none_updated(self) -> None: + impl = MagicMock() + impl.library_id = "matplotlib" + impl.updated = None + + spec = MagicMock() + spec.id = "scatter-basic" + spec.impls = [impl] + spec.updated = None + + result = _build_sitemap_xml([spec]) + # Should not have lastmod when updated is None + assert "scatter-basic" in result + + +class TestBotHtmlTemplate: + """Tests for the BOT_HTML_TEMPLATE.""" + + def test_template_has_required_meta_tags(self) -> None: + result = BOT_HTML_TEMPLATE.format( + title="Test Title", + description="Test Description", + image="https://example.com/image.png", + url="https://example.com", + ) + assert "og:title" in result + assert "og:description" in result + assert "og:image" in result + assert "og:url" in result + assert "twitter:card" in result + assert "summary_large_image" in result + assert "Test Title" in result + assert "Test Description" in result + + def test_template_has_canonical(self) -> None: + result = BOT_HTML_TEMPLATE.format( + title="t", description="d", image="i", url="https://pyplots.ai/" + ) + assert 'rel="canonical"' in result + assert "https://pyplots.ai/" in result diff --git a/tests/unit/automation/scripts/test_sync_helpers.py b/tests/unit/automation/scripts/test_sync_helpers.py new file mode 100644 index 0000000000..f55538ea99 --- /dev/null +++ b/tests/unit/automation/scripts/test_sync_helpers.py @@ -0,0 +1,119 @@ +""" +Tests for sync_to_postgres helper functions that aren't covered by existing tests. + +Focuses on _validate_quality_score, _parse_markdown_section, and _validate_spec_id. +""" + +import pytest + +from automation.scripts.sync_to_postgres import ( + _parse_markdown_section, + _validate_quality_score, + _validate_spec_id, +) + + +class TestValidateQualityScore: + """Tests for _validate_quality_score.""" + + def test_none(self) -> None: + assert _validate_quality_score(None) is None + + def test_valid_float(self) -> None: + assert _validate_quality_score(92.5) == 92.5 + + def test_valid_int(self) -> None: + assert _validate_quality_score(90) == 90.0 + + def test_zero(self) -> None: + assert _validate_quality_score(0) == 0.0 + + def test_hundred(self) -> None: + assert _validate_quality_score(100) == 100.0 + + def test_string_number(self) -> None: + assert _validate_quality_score("85.5") == 85.5 + + def test_out_of_range_high(self) -> None: + assert _validate_quality_score(101) is None + + def test_out_of_range_negative(self) -> None: + assert _validate_quality_score(-1) is None + + def test_invalid_string(self) -> None: + assert _validate_quality_score("not-a-number") is None + + def test_invalid_type(self) -> None: + assert _validate_quality_score([1, 2, 3]) is None + + +class TestValidateSpecId: + """Tests for _validate_spec_id.""" + + def test_valid_simple(self) -> None: + assert _validate_spec_id("scatter") is True + + def test_valid_with_hyphens(self) -> None: + assert _validate_spec_id("scatter-basic") is True + + def test_valid_multi_hyphen(self) -> None: + assert _validate_spec_id("scatter-regression-linear") is True + + def test_valid_with_numbers(self) -> None: + assert _validate_spec_id("bar-3d-categorical") is True + + def test_invalid_uppercase(self) -> None: + assert _validate_spec_id("Scatter-Basic") is False + + def test_invalid_underscore(self) -> None: + assert _validate_spec_id("scatter_basic") is False + + def test_invalid_spaces(self) -> None: + assert _validate_spec_id("scatter basic") is False + + def test_invalid_starts_with_hyphen(self) -> None: + assert _validate_spec_id("-scatter") is False + + def test_invalid_ends_with_hyphen(self) -> None: + assert _validate_spec_id("scatter-") is False + + def test_empty_string(self) -> None: + assert _validate_spec_id("") is False + + def test_invalid_double_hyphen(self) -> None: + assert _validate_spec_id("scatter--basic") is False + + +class TestParseMarkdownSection: + """Tests for _parse_markdown_section.""" + + def test_parse_text_section(self) -> None: + content = "## Description\nThis is a scatter plot.\n## Applications\n- Data viz\n" + result = _parse_markdown_section(content, "Description") + assert result == "This is a scatter plot." + + def test_parse_bullet_section(self) -> None: + content = "## Applications\n- Data visualization\n- Correlation analysis\n## Notes\n" + result = _parse_markdown_section(content, "Applications", as_bullets=True) + assert result == ["Data visualization", "Correlation analysis"] + + def test_missing_section_text(self) -> None: + content = "## Other\nSome content\n" + result = _parse_markdown_section(content, "Description") + assert result == "" + + def test_missing_section_bullets(self) -> None: + content = "## Other\nSome content\n" + result = _parse_markdown_section(content, "Applications", as_bullets=True) + assert result == [] + + def test_section_at_end_of_file(self) -> None: + content = "## Description\nLast section content." + result = _parse_markdown_section(content, "Description") + assert result == "Last section content." + + def test_multiline_text_section(self) -> None: + content = "## Description\nLine 1\nLine 2\nLine 3\n## Next\n" + result = _parse_markdown_section(content, "Description") + assert "Line 1" in result + assert "Line 3" in result diff --git a/tests/unit/core/database/test_models.py b/tests/unit/core/database/test_models.py new file mode 100644 index 0000000000..5098b407cf --- /dev/null +++ b/tests/unit/core/database/test_models.py @@ -0,0 +1,155 @@ +""" +Tests for database ORM models. + +Tests model instantiation, defaults, and constraints. +""" + +import pytest +from sqlalchemy.ext.asyncio import AsyncSession + +from core.database.models import ( + Impl, + Library, + MAX_LIBRARY_ID_LENGTH, + MAX_SPEC_ID_LENGTH, + REVIEW_VERDICTS, + Spec, +) + + +class TestModelConstants: + """Tests for model constants.""" + + def test_max_spec_id_length(self) -> None: + assert MAX_SPEC_ID_LENGTH == 100 + + def test_max_library_id_length(self) -> None: + assert MAX_LIBRARY_ID_LENGTH == 50 + + def test_review_verdicts(self) -> None: + assert "APPROVED" in REVIEW_VERDICTS + assert "REJECTED" in REVIEW_VERDICTS + assert len(REVIEW_VERDICTS) == 2 + + +class TestSpecModel: + """Tests for Spec model.""" + + def test_create_minimal(self) -> None: + spec = Spec(id="scatter-basic", title="Basic Scatter") + assert spec.id == "scatter-basic" + assert spec.title == "Basic Scatter" + assert spec.description is None + assert spec.issue is None + + def test_create_with_fields(self) -> None: + spec = Spec( + id="scatter-basic", + title="Basic Scatter", + description="A scatter plot", + applications=["data viz"], + data=["numeric"], + notes=["Use for 2D"], + tags={"plot_type": ["scatter"]}, + issue=42, + suggested="user1", + ) + assert spec.description == "A scatter plot" + assert spec.applications == ["data viz"] + assert spec.tags == {"plot_type": ["scatter"]} + assert spec.issue == 42 + + @pytest.mark.asyncio + async def test_persist_and_retrieve(self, test_session: AsyncSession) -> None: + spec = Spec(id="test-spec", title="Test Spec", description="test") + test_session.add(spec) + await test_session.commit() + + from sqlalchemy import select + result = await test_session.execute(select(Spec).where(Spec.id == "test-spec")) + retrieved = result.scalar_one() + assert retrieved.title == "Test Spec" + + +class TestLibraryModel: + """Tests for Library model.""" + + def test_create_minimal(self) -> None: + lib = Library(id="matplotlib", name="Matplotlib") + assert lib.id == "matplotlib" + assert lib.version is None + + def test_create_with_fields(self) -> None: + lib = Library( + id="matplotlib", + name="Matplotlib", + version="3.10.0", + documentation_url="https://matplotlib.org", + description="Plotting library", + ) + assert lib.version == "3.10.0" + assert lib.documentation_url == "https://matplotlib.org" + + +class TestImplModel: + """Tests for Impl model.""" + + def test_create_minimal(self) -> None: + impl = Impl(spec_id="scatter-basic", library_id="matplotlib") + assert impl.spec_id == "scatter-basic" + assert impl.library_id == "matplotlib" + assert impl.code is None + assert impl.quality_score is None + assert impl.review_verdict is None + + def test_create_with_review_fields(self) -> None: + impl = Impl( + spec_id="scatter-basic", + library_id="matplotlib", + quality_score=92.5, + review_strengths=["clean code"], + review_weaknesses=["needs labels"], + review_verdict="APPROVED", + impl_tags={"techniques": ["annotations"]}, + ) + assert impl.quality_score == 92.5 + assert impl.review_verdict == "APPROVED" + assert impl.impl_tags == {"techniques": ["annotations"]} + + @pytest.mark.asyncio + async def test_default_id_generated(self, test_session: AsyncSession) -> None: + """IDs are auto-generated on insert.""" + lib = Library(id="matplotlib", name="Matplotlib") + spec1 = Spec(id="s1", title="Spec 1") + spec2 = Spec(id="s2", title="Spec 2") + test_session.add_all([lib, spec1, spec2]) + await test_session.commit() + + impl1 = Impl(spec_id="s1", library_id="matplotlib") + impl2 = Impl(spec_id="s2", library_id="matplotlib") + test_session.add_all([impl1, impl2]) + await test_session.commit() + assert impl1.id is not None + assert impl2.id is not None + assert impl1.id != impl2.id + + @pytest.mark.asyncio + async def test_persist_with_foreign_keys(self, test_session: AsyncSession) -> None: + lib = Library(id="matplotlib", name="Matplotlib") + spec = Spec(id="scatter-basic", title="Basic Scatter") + test_session.add_all([lib, spec]) + await test_session.commit() + + impl = Impl( + spec_id="scatter-basic", + library_id="matplotlib", + code="import matplotlib", + quality_score=90.0, + ) + test_session.add(impl) + await test_session.commit() + + from sqlalchemy import select + result = await test_session.execute(select(Impl).where(Impl.spec_id == "scatter-basic")) + retrieved = result.scalar_one() + assert retrieved.quality_score == 90.0 diff --git a/tests/unit/core/database/test_repositories.py b/tests/unit/core/database/test_repositories.py new file mode 100644 index 0000000000..de386f3b42 --- /dev/null +++ b/tests/unit/core/database/test_repositories.py @@ -0,0 +1,275 @@ +""" +Tests for database repository classes. + +Uses in-memory SQLite to test repository operations. +""" + +import pytest +from sqlalchemy.ext.asyncio import AsyncSession + +from core.database.models import Impl, Library, Spec +from core.database.repositories import ( + IMPL_UPDATABLE_FIELDS, + LIBRARY_UPDATABLE_FIELDS, + SPEC_UPDATABLE_FIELDS, + BaseRepository, + ImplRepository, + LibraryRepository, + SpecRepository, +) + + +# ===== Field Validation Constants ===== + + +class TestUpdatableFieldConstants: + """Tests for the updatable field sets.""" + + def test_spec_fields_includes_core(self) -> None: + assert "title" in SPEC_UPDATABLE_FIELDS + assert "description" in SPEC_UPDATABLE_FIELDS + assert "tags" in SPEC_UPDATABLE_FIELDS + + def test_spec_fields_excludes_id(self) -> None: + assert "id" not in SPEC_UPDATABLE_FIELDS + + def test_library_fields(self) -> None: + assert "name" in LIBRARY_UPDATABLE_FIELDS + assert "version" in LIBRARY_UPDATABLE_FIELDS + assert "id" not in LIBRARY_UPDATABLE_FIELDS + + def test_impl_fields_includes_review(self) -> None: + assert "quality_score" in IMPL_UPDATABLE_FIELDS + assert "review_strengths" in IMPL_UPDATABLE_FIELDS + assert "review_weaknesses" in IMPL_UPDATABLE_FIELDS + assert "review_verdict" in IMPL_UPDATABLE_FIELDS + assert "impl_tags" in IMPL_UPDATABLE_FIELDS + + def test_impl_fields_excludes_keys(self) -> None: + assert "id" not in IMPL_UPDATABLE_FIELDS + assert "spec_id" not in IMPL_UPDATABLE_FIELDS + assert "library_id" not in IMPL_UPDATABLE_FIELDS + + +# ===== Repository Tests with In-Memory SQLite ===== + + +class TestSpecRepository: + """Tests for SpecRepository.""" + + @pytest.mark.asyncio + async def test_get_all_empty(self, test_session: AsyncSession) -> None: + repo = SpecRepository(test_session) + specs = await repo.get_all() + assert specs == [] + + @pytest.mark.asyncio + async def test_create_and_get(self, test_session: AsyncSession) -> None: + repo = SpecRepository(test_session) + spec = await repo.create({"id": "scatter-basic", "title": "Basic Scatter"}) + assert spec.id == "scatter-basic" + assert spec.title == "Basic Scatter" + + retrieved = await repo.get_by_id("scatter-basic") + assert retrieved is not None + assert retrieved.title == "Basic Scatter" + + @pytest.mark.asyncio + async def test_get_by_id_not_found(self, test_session: AsyncSession) -> None: + repo = SpecRepository(test_session) + result = await repo.get_by_id("nonexistent") + assert result is None + + @pytest.mark.asyncio + async def test_get_ids(self, test_session: AsyncSession) -> None: + repo = SpecRepository(test_session) + await repo.create({"id": "bar-basic", "title": "Basic Bar"}) + await repo.create({"id": "scatter-basic", "title": "Basic Scatter"}) + + ids = await repo.get_ids() + assert ids == ["bar-basic", "scatter-basic"] + + @pytest.mark.asyncio + async def test_upsert_creates(self, test_session: AsyncSession) -> None: + repo = SpecRepository(test_session) + spec = await repo.upsert({"id": "new-spec", "title": "New Spec"}) + assert spec.id == "new-spec" + assert spec.title == "New Spec" + + @pytest.mark.asyncio + async def test_upsert_updates(self, test_session: AsyncSession) -> None: + repo = SpecRepository(test_session) + await repo.create({"id": "existing", "title": "Original"}) + spec = await repo.upsert({"id": "existing", "title": "Updated"}) + assert spec.title == "Updated" + + @pytest.mark.asyncio + async def test_upsert_without_id_raises(self, test_session: AsyncSession) -> None: + repo = SpecRepository(test_session) + with pytest.raises(ValueError, match="must include 'id'"): + await repo.upsert({"title": "No ID"}) + + @pytest.mark.asyncio + async def test_update(self, test_session: AsyncSession) -> None: + repo = SpecRepository(test_session) + await repo.create({"id": "s1", "title": "Original"}) + updated = await repo.update("s1", {"title": "Updated", "description": "New desc"}) + assert updated is not None + assert updated.title == "Updated" + assert updated.description == "New desc" + + @pytest.mark.asyncio + async def test_update_nonexistent(self, test_session: AsyncSession) -> None: + repo = SpecRepository(test_session) + result = await repo.update("nonexistent", {"title": "X"}) + assert result is None + + @pytest.mark.asyncio + async def test_update_ignores_non_updatable(self, test_session: AsyncSession) -> None: + repo = SpecRepository(test_session) + await repo.create({"id": "s1", "title": "Original"}) + updated = await repo.update("s1", {"id": "new-id", "title": "Updated"}) + assert updated.id == "s1" # ID should not change + assert updated.title == "Updated" + + @pytest.mark.asyncio + async def test_delete(self, test_session: AsyncSession) -> None: + repo = SpecRepository(test_session) + await repo.create({"id": "s1", "title": "To Delete"}) + assert await repo.delete("s1") is True + assert await repo.get_by_id("s1") is None + + @pytest.mark.asyncio + async def test_delete_nonexistent(self, test_session: AsyncSession) -> None: + repo = SpecRepository(test_session) + assert await repo.delete("nonexistent") is False + + +class TestLibraryRepository: + """Tests for LibraryRepository.""" + + @pytest.mark.asyncio + async def test_create_and_get_all(self, test_session: AsyncSession) -> None: + repo = LibraryRepository(test_session) + await repo.create({"id": "matplotlib", "name": "Matplotlib"}) + await repo.create({"id": "seaborn", "name": "Seaborn"}) + + libs = await repo.get_all() + assert len(libs) == 2 + # Should be ordered by name + assert libs[0].name == "Matplotlib" + assert libs[1].name == "Seaborn" + + @pytest.mark.asyncio + async def test_upsert_creates(self, test_session: AsyncSession) -> None: + repo = LibraryRepository(test_session) + lib = await repo.upsert({"id": "bokeh", "name": "Bokeh", "version": "3.0"}) + assert lib.id == "bokeh" + assert lib.version == "3.0" + + @pytest.mark.asyncio + async def test_upsert_updates(self, test_session: AsyncSession) -> None: + repo = LibraryRepository(test_session) + await repo.create({"id": "matplotlib", "name": "Matplotlib", "version": "3.9"}) + lib = await repo.upsert({"id": "matplotlib", "name": "Matplotlib", "version": "3.10"}) + assert lib.version == "3.10" + + @pytest.mark.asyncio + async def test_upsert_without_id_raises(self, test_session: AsyncSession) -> None: + repo = LibraryRepository(test_session) + with pytest.raises(ValueError, match="must include 'id'"): + await repo.upsert({"name": "No ID"}) + + +class TestImplRepository: + """Tests for ImplRepository.""" + + @pytest.fixture + async def setup_data(self, test_session: AsyncSession): + """Set up test data for impl repository tests.""" + lib = Library(id="matplotlib", name="Matplotlib") + test_session.add(lib) + spec = Spec(id="scatter-basic", title="Basic Scatter") + test_session.add(spec) + await test_session.commit() + return test_session + + @pytest.mark.asyncio + async def test_upsert_creates(self, setup_data: AsyncSession) -> None: + repo = ImplRepository(setup_data) + impl = await repo.upsert("scatter-basic", "matplotlib", {"code": "print(1)", "quality_score": 90.0}) + assert impl.spec_id == "scatter-basic" + assert impl.library_id == "matplotlib" + assert impl.quality_score == 90.0 + + @pytest.mark.asyncio + async def test_upsert_updates(self, setup_data: AsyncSession) -> None: + repo = ImplRepository(setup_data) + await repo.upsert("scatter-basic", "matplotlib", {"code": "v1", "quality_score": 80.0}) + impl = await repo.upsert("scatter-basic", "matplotlib", {"code": "v2", "quality_score": 95.0}) + assert impl.quality_score == 95.0 + + @pytest.mark.asyncio + async def test_get_by_spec(self, setup_data: AsyncSession) -> None: + repo = ImplRepository(setup_data) + await repo.upsert("scatter-basic", "matplotlib", {"code": "test"}) + impls = await repo.get_by_spec("scatter-basic") + assert len(impls) == 1 + assert impls[0].library_id == "matplotlib" + + @pytest.mark.asyncio + async def test_get_by_library(self, setup_data: AsyncSession) -> None: + repo = ImplRepository(setup_data) + await repo.upsert("scatter-basic", "matplotlib", {"code": "test"}) + impls = await repo.get_by_library("matplotlib") + assert len(impls) == 1 + assert impls[0].spec_id == "scatter-basic" + + @pytest.mark.asyncio + async def test_get_code(self, setup_data: AsyncSession) -> None: + repo = ImplRepository(setup_data) + await repo.upsert("scatter-basic", "matplotlib", {"code": "import matplotlib"}) + impl = await repo.get_code("scatter-basic", "matplotlib") + assert impl is not None + assert impl.code == "import matplotlib" + + @pytest.mark.asyncio + async def test_get_code_not_found(self, setup_data: AsyncSession) -> None: + repo = ImplRepository(setup_data) + impl = await repo.get_code("nonexistent", "matplotlib") + assert impl is None + + @pytest.mark.asyncio + async def test_get_by_spec_and_library(self, setup_data: AsyncSession) -> None: + repo = ImplRepository(setup_data) + await repo.upsert("scatter-basic", "matplotlib", {"code": "test", "review_verdict": "APPROVED"}) + impl = await repo.get_by_spec_and_library("scatter-basic", "matplotlib") + assert impl is not None + assert impl.review_verdict == "APPROVED" + + @pytest.mark.asyncio + async def test_get_by_spec_and_library_not_found(self, setup_data: AsyncSession) -> None: + repo = ImplRepository(setup_data) + impl = await repo.get_by_spec_and_library("nonexistent", "matplotlib") + assert impl is None + + @pytest.mark.asyncio + async def test_get_total_code_lines(self, setup_data: AsyncSession) -> None: + repo = ImplRepository(setup_data) + await repo.upsert("scatter-basic", "matplotlib", {"code": "line1\nline2\nline3"}) + total = await repo.get_total_code_lines() + assert total == 3 + + @pytest.mark.asyncio + async def test_get_total_code_lines_empty(self, test_session: AsyncSession) -> None: + repo = ImplRepository(test_session) + total = await repo.get_total_code_lines() + assert total == 0 + + @pytest.mark.asyncio + async def test_get_loc_per_impl(self, setup_data: AsyncSession) -> None: + repo = ImplRepository(setup_data) + await repo.upsert("scatter-basic", "matplotlib", {"code": "line1\nline2"}) + loc = await repo.get_loc_per_impl() + assert len(loc) == 1 + assert loc[0] == ("matplotlib", 2) diff --git a/tests/unit/core/test_config_resolve.py b/tests/unit/core/test_config_resolve.py new file mode 100644 index 0000000000..ac3e8c4e85 --- /dev/null +++ b/tests/unit/core/test_config_resolve.py @@ -0,0 +1,73 @@ +""" +Tests for Settings.resolve_model method. + +Tests model tier resolution for different CLIs. +""" + +from unittest.mock import patch + +import pytest + +from core.config import Settings + + +class TestResolveModel: + """Tests for Settings.resolve_model.""" + + @pytest.fixture + def settings(self) -> Settings: + """Create a Settings instance with default model mappings.""" + with patch.dict("os.environ", {}, clear=False): + return Settings( + cli_model_claude_small="claude-haiku", + cli_model_claude_medium="claude-sonnet", + cli_model_claude_large="claude-opus", + cli_model_copilot_small="gpt-4o-mini", + cli_model_copilot_medium="gpt-4o", + cli_model_copilot_large="o1", + cli_model_gemini_small="gemini-flash", + cli_model_gemini_medium="gemini-pro", + cli_model_gemini_large="gemini-ultra", + ) + + def test_claude_small(self, settings: Settings) -> None: + assert settings.resolve_model("claude", "small") == "claude-haiku" + + def test_claude_medium(self, settings: Settings) -> None: + assert settings.resolve_model("claude", "medium") == "claude-sonnet" + + def test_claude_large(self, settings: Settings) -> None: + assert settings.resolve_model("claude", "large") == "claude-opus" + + def test_copilot_small(self, settings: Settings) -> None: + assert settings.resolve_model("copilot", "small") == "gpt-4o-mini" + + def test_copilot_large(self, settings: Settings) -> None: + assert settings.resolve_model("copilot", "large") == "o1" + + def test_gemini_medium(self, settings: Settings) -> None: + assert settings.resolve_model("gemini", "medium") == "gemini-pro" + + def test_unknown_cli_returns_tier(self) -> None: + """Unknown CLI should return the tier unchanged (pass-through).""" + with patch.dict("os.environ", {}, clear=False): + s = Settings() + assert s.resolve_model("unknown-cli", "small") == "small" + + def test_unknown_tier_returns_tier(self) -> None: + """Unknown tier should return the tier unchanged (pass-through).""" + with patch.dict("os.environ", {}, clear=False): + s = Settings() + assert s.resolve_model("claude", "xlarge") == "xlarge" + + def test_unknown_cli_and_tier(self) -> None: + with patch.dict("os.environ", {}, clear=False): + s = Settings() + assert s.resolve_model("unknown", "unknown") == "unknown" + + def test_exact_model_name_passthrough(self) -> None: + """When tier is an exact model name, it's returned as-is.""" + with patch.dict("os.environ", {}, clear=False): + s = Settings() + result = s.resolve_model("claude", "claude-3-opus-20240229") + assert result == "claude-3-opus-20240229" From b1f5ffb4ea05f6325f6f8eb23d94da79bfb84b09 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 12 Apr 2026 21:00:13 +0000 Subject: [PATCH 2/5] improve frontend test coverage from 33% to 71% Add comprehensive tests for all previously untested components and pages: Components: - SpecTabs.test.tsx: 22 tests (tabs, toggle, copy, quality, tags) - SpecDetailView.test.tsx: 13 tests (zoom, actions, overlay, counter) - SpecOverview.test.tsx: 13 tests (grid, sorting, cards, tooltips) - FilterBar.test.tsx: 5 tests (chips, counter, filter groups) - Layout.test.tsx: 4 tests (provider, context, fetch) - CodeHighlighter.test.tsx: 3 tests (render, language) Pages: - StatsPage.test.tsx: 9 tests (dashboard, stats, loading, error) - CatalogPage.test.tsx: 8 tests (specs, grouping, loading) - SpecPage.test.tsx: 10 tests (overview/detail modes, 404, fetch) - DebugPage.test.tsx: 4 tests (debug data, loading, error) - HomePage.test.tsx: 4 tests (components, POTD, grid) - InteractivePage.test.tsx: 4 tests (iframe, loading, error) Utils/Hooks: - filters-extended.test.ts: 17 tests (getAvailableValues, search) - useFilterState-extended.test.ts: 7 tests (isFiltersEmpty) https://claude.ai/code/session_01KhAhJKpEoqCzmWzcALSfW6 --- app/src/components/CodeHighlighter.test.tsx | 54 ++ app/src/components/FilterBar.test.tsx | 84 ++++ app/src/components/Layout.test.tsx | 130 +++++ app/src/components/SpecDetailView.test.tsx | 164 ++++++ app/src/components/SpecOverview.test.tsx | 181 +++++++ app/src/components/SpecTabs.test.tsx | 466 ++++++++++++++++++ app/src/hooks/useFilterState-extended.test.ts | 71 +++ app/src/pages/CatalogPage.test.tsx | 139 ++++++ app/src/pages/DebugPage.test.tsx | 92 ++++ app/src/pages/HomePage.test.tsx | 91 ++++ app/src/pages/InteractivePage.test.tsx | 96 ++++ app/src/pages/SpecPage.test.tsx | 200 ++++++++ app/src/pages/StatsPage.test.tsx | 206 ++++++++ app/src/utils/filters-extended.test.ts | 193 ++++++++ 14 files changed, 2167 insertions(+) create mode 100644 app/src/components/CodeHighlighter.test.tsx create mode 100644 app/src/components/FilterBar.test.tsx create mode 100644 app/src/components/Layout.test.tsx create mode 100644 app/src/components/SpecDetailView.test.tsx create mode 100644 app/src/components/SpecOverview.test.tsx create mode 100644 app/src/components/SpecTabs.test.tsx create mode 100644 app/src/hooks/useFilterState-extended.test.ts create mode 100644 app/src/pages/CatalogPage.test.tsx create mode 100644 app/src/pages/DebugPage.test.tsx create mode 100644 app/src/pages/HomePage.test.tsx create mode 100644 app/src/pages/InteractivePage.test.tsx create mode 100644 app/src/pages/SpecPage.test.tsx create mode 100644 app/src/pages/StatsPage.test.tsx create mode 100644 app/src/utils/filters-extended.test.ts diff --git a/app/src/components/CodeHighlighter.test.tsx b/app/src/components/CodeHighlighter.test.tsx new file mode 100644 index 0000000000..39f27bae95 --- /dev/null +++ b/app/src/components/CodeHighlighter.test.tsx @@ -0,0 +1,54 @@ +import { describe, it, expect, vi } from 'vitest'; +import { render, screen } from '../test-utils'; + +vi.mock('react-syntax-highlighter/dist/esm/prism-light', () => { + const MockHighlighter = ({ + children, + language, + ...props + }: { + children: string; + language: string; + style?: object; + customStyle?: object; + }) => ( +
+      {children}
+    
+ ); + MockHighlighter.registerLanguage = vi.fn(); + return { default: MockHighlighter }; +}); + +vi.mock('react-syntax-highlighter/dist/esm/styles/prism', () => ({ + oneLight: {}, +})); + +vi.mock('react-syntax-highlighter/dist/esm/languages/prism/python', () => ({ + default: {}, +})); + +import CodeHighlighter from './CodeHighlighter'; + +describe('CodeHighlighter', () => { + it('renders without crashing', () => { + render(); + expect(screen.getByTestId('syntax-highlighter')).toBeInTheDocument(); + }); + + it('renders the provided code text', () => { + const code = 'import matplotlib.pyplot as plt\nplt.show()'; + render(); + const highlighter = screen.getByTestId('syntax-highlighter'); + expect(highlighter).toHaveTextContent('import matplotlib.pyplot as plt'); + expect(highlighter).toHaveTextContent('plt.show()'); + }); + + it('sets language to python', () => { + render(); + expect(screen.getByTestId('syntax-highlighter')).toHaveAttribute( + 'data-language', + 'python' + ); + }); +}); diff --git a/app/src/components/FilterBar.test.tsx b/app/src/components/FilterBar.test.tsx new file mode 100644 index 0000000000..1cfbb89105 --- /dev/null +++ b/app/src/components/FilterBar.test.tsx @@ -0,0 +1,84 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { render, screen } from '../test-utils'; + +// Mock the utils module +vi.mock('../utils', () => ({ + getAvailableValues: vi.fn(() => [['scatter', 10], ['bar', 5]]), + getAvailableValuesForGroup: vi.fn(() => [['scatter', 15]]), + getSearchResults: vi.fn(() => []), +})); + +import { FilterBar } from './FilterBar'; + +// ResizeObserver polyfill +class MockResizeObserver { + observe = vi.fn(); + unobserve = vi.fn(); + disconnect = vi.fn(); +} + +const defaultProps = { + activeFilters: [] as { category: 'lib'; values: string[] }[], + filterCounts: { + lib: { matplotlib: 100, seaborn: 80 }, + spec: {}, plot: {}, data: {}, dom: {}, feat: {}, + dep: {}, tech: {}, pat: {}, prep: {}, style: {}, + }, + orCounts: [] as Record[], + specTitles: {}, + currentTotal: 100, + displayedCount: 20, + randomAnimation: null, + imageSize: 'normal' as const, + onImageSizeChange: vi.fn(), + onAddFilter: vi.fn(), + onAddValueToGroup: vi.fn(), + onRemoveFilter: vi.fn(), + onRemoveGroup: vi.fn(), + onTrackEvent: vi.fn(), +}; + +describe('FilterBar', () => { + beforeEach(() => { + vi.clearAllMocks(); + vi.stubGlobal('ResizeObserver', MockResizeObserver); + }); + + it('renders without crashing', () => { + render(); + // Component should mount and have an input + expect(document.querySelector('input')).toBeTruthy(); + }); + + it('renders active filter chip with category:value format', () => { + const filters = [ + { category: 'lib' as const, values: ['matplotlib'] }, + ]; + render(); + // Chip label is "category:value" format + expect(screen.getByText('lib:matplotlib')).toBeInTheDocument(); + }); + + it('shows counter text with total', () => { + render(); + expect(screen.getByText(/42/)).toBeInTheDocument(); + }); + + it('renders chip for each filter group', () => { + const filters = [ + { category: 'lib' as const, values: ['matplotlib'] }, + { category: 'plot' as const, values: ['scatter'] }, + ]; + render(); + const chips = document.querySelectorAll('.MuiChip-root'); + expect(chips.length).toBeGreaterThanOrEqual(2); + }); + + it('renders comma-separated values in chip', () => { + const filters = [ + { category: 'lib' as const, values: ['matplotlib', 'seaborn'] }, + ]; + render(); + expect(screen.getByText('lib:matplotlib,seaborn')).toBeInTheDocument(); + }); +}); diff --git a/app/src/components/Layout.test.tsx b/app/src/components/Layout.test.tsx new file mode 100644 index 0000000000..95ba4de2d9 --- /dev/null +++ b/app/src/components/Layout.test.tsx @@ -0,0 +1,130 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { render, screen, waitFor } from '@testing-library/react'; +import { MemoryRouter } from 'react-router-dom'; +import { ThemeProvider, createTheme } from '@mui/material/styles'; +import { AppDataProvider, Layout } from './Layout'; +import { AppDataContext } from '../hooks/useLayoutContext'; +import { useContext } from 'react'; + +vi.mock('react-helmet-async', () => ({ + Helmet: ({ children }: { children: React.ReactNode }) => <>{children}, +})); + +// jsdom does not have requestIdleCallback / cancelIdleCallback +vi.stubGlobal( + 'requestIdleCallback', + vi.fn((cb: IdleRequestCallback) => { + const id = setTimeout(() => cb({} as IdleDeadline), 0); + return id as unknown as number; + }), +); +vi.stubGlobal('cancelIdleCallback', vi.fn((id: number) => clearTimeout(id))); + +const theme = createTheme(); + +function wrap(ui: React.ReactElement) { + return render(ui, { + wrapper: ({ children }) => ( + + {children} + + ), + }); +} + +describe('Layout', () => { + it('renders children via Outlet', () => { + // Layout uses , which renders nothing without route context, + // but the wrapper itself renders without errors. + wrap(); + + // The main Box should be present + const main = document.querySelector('main'); + expect(main).toBeInTheDocument(); + }); +}); + +describe('AppDataProvider', () => { + beforeEach(() => { + vi.restoreAllMocks(); + // Re-stub after restoreAllMocks clears them + vi.stubGlobal( + 'requestIdleCallback', + vi.fn((cb: IdleRequestCallback) => { + const id = setTimeout(() => cb({} as IdleDeadline), 0); + return id as unknown as number; + }), + ); + vi.stubGlobal('cancelIdleCallback', vi.fn((id: number) => clearTimeout(id))); + }); + + it('provides context to children', async () => { + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + json: () => Promise.resolve({ specs: [], libraries: [], specs_count: 0, plots_count: 0, libraries_count: 0 }), + }), + ); + + function Consumer() { + const ctx = useContext(AppDataContext); + return
{ctx ? 'has-context' : 'no-context'}
; + } + + wrap( + + + , + ); + + expect(screen.getByTestId('ctx')).toHaveTextContent('has-context'); + }); + + it('calls fetch for /specs, /libraries, and /stats', async () => { + const fetchMock = vi.fn().mockResolvedValue({ + ok: true, + json: () => Promise.resolve({}), + }); + vi.stubGlobal('fetch', fetchMock); + + wrap( + +
child
+
, + ); + + await waitFor(() => { + expect(fetchMock).toHaveBeenCalledTimes(3); + }); + + const urls = fetchMock.mock.calls.map((c: unknown[]) => c[0] as string); + expect(urls.some((u: string) => u.includes('/specs'))).toBe(true); + expect(urls.some((u: string) => u.includes('/libraries'))).toBe(true); + expect(urls.some((u: string) => u.includes('/stats'))).toBe(true); + }); + + it('handles fetch failure gracefully', async () => { + const consoleSpy = vi.spyOn(console, 'warn').mockImplementation(() => {}); + vi.stubGlobal( + 'fetch', + vi.fn().mockRejectedValue(new Error('Network error')), + ); + + wrap( + +
still renders
+
, + ); + + await waitFor(() => { + expect(consoleSpy).toHaveBeenCalledWith( + 'Initial data load incomplete:', + 'Network error', + ); + }); + + expect(screen.getByTestId('child')).toHaveTextContent('still renders'); + consoleSpy.mockRestore(); + }); +}); diff --git a/app/src/components/SpecDetailView.test.tsx b/app/src/components/SpecDetailView.test.tsx new file mode 100644 index 0000000000..e9b19a124f --- /dev/null +++ b/app/src/components/SpecDetailView.test.tsx @@ -0,0 +1,164 @@ +import { describe, it, expect, vi } from 'vitest'; +import { render, screen, userEvent } from '../test-utils'; +import { SpecDetailView } from './SpecDetailView'; +import type { Implementation } from '../types'; + +vi.mock('../utils/responsiveImage', () => ({ + buildDetailSrcSet: (url: string, fmt: string) => `${url}-srcset-${fmt}`, + DETAIL_SIZES: '100vw', +})); + +const makeImpl = (overrides: Partial = {}): Implementation => ({ + library_id: 'matplotlib', + library_name: 'Matplotlib', + preview_url: 'https://example.com/plot.png', + preview_html: null, + quality_score: 85, + code: 'import matplotlib\nprint("hello")', + ...overrides, +}); + +const implA = makeImpl({ library_id: 'altair', library_name: 'Altair' }); +const implB = makeImpl({ library_id: 'matplotlib', library_name: 'Matplotlib' }); +const implC = makeImpl({ library_id: 'plotly', library_name: 'Plotly', preview_html: '
interactive
' }); + +const defaultProps = { + specId: 'scatter-basic', + specTitle: 'Basic Scatter Plot', + selectedLibrary: 'matplotlib', + currentImpl: implB, + implementations: [implB, implA, implC], + imageLoaded: true, + codeCopied: null, + downloadDone: null, + onImageLoad: vi.fn(), + onCopyCode: vi.fn(), + onDownload: vi.fn(), + onTrackEvent: vi.fn(), +}; + +describe('SpecDetailView', () => { + it('renders image with correct alt text', () => { + render(); + const img = screen.getByRole('img'); + expect(img).toHaveAttribute('alt', 'Basic Scatter Plot - matplotlib'); + }); + + it('shows skeleton when image is not loaded', () => { + const { container } = render( + , + ); + // MUI Skeleton renders a span with class containing MuiSkeleton + const skeleton = container.querySelector('.MuiSkeleton-root'); + expect(skeleton).toBeInTheDocument(); + }); + + it('does not show skeleton when image is loaded', () => { + const { container } = render( + , + ); + const skeleton = container.querySelector('.MuiSkeleton-root'); + expect(skeleton).not.toBeInTheDocument(); + }); + + it('renders Copy Code button that calls onCopyCode', async () => { + const onCopyCode = vi.fn(); + const user = userEvent.setup(); + + render(); + + const btn = screen.getByRole('button', { name: /copy code/i }); + expect(btn).toBeInTheDocument(); + await user.click(btn); + expect(onCopyCode).toHaveBeenCalledWith(implB); + }); + + it('renders Download PNG button that calls onDownload', async () => { + const onDownload = vi.fn(); + const user = userEvent.setup(); + + render(); + + const btn = screen.getByRole('button', { name: /download png/i }); + expect(btn).toBeInTheDocument(); + await user.click(btn); + expect(onDownload).toHaveBeenCalledWith(implB); + }); + + it('shows Open Interactive button only when preview_html exists', () => { + // No preview_html on current impl + const { rerender } = render(); + expect(screen.queryByRole('button', { name: /open interactive/i })).not.toBeInTheDocument(); + + // With preview_html + rerender( + , + ); + // Open interactive is rendered as a link (IconButton with component={Link}) + expect(screen.getByRole('link', { name: /open interactive/i })).toBeInTheDocument(); + }); + + it('shows implementation counter with current/total', () => { + render(); + // Sorted alphabetically: altair(1), matplotlib(2), plotly(3) -> matplotlib = 2/3 + expect(screen.getByText('2/3')).toBeInTheDocument(); + }); + + it('does not show implementation counter when only one implementation', () => { + render( + , + ); + expect(screen.queryByText('1/1')).not.toBeInTheDocument(); + }); + + it('shows ">>> copied" overlay when codeCopied matches current library', () => { + render( + , + ); + expect(screen.getByText('>>> copied')).toBeInTheDocument(); + }); + + it('shows ">>> downloaded" overlay when downloadDone matches current library', () => { + render( + , + ); + expect(screen.getByText('>>> downloaded')).toBeInTheDocument(); + }); + + it('does not show overlay when codeCopied does not match current library', () => { + render( + , + ); + expect(screen.queryByText('>>> copied')).not.toBeInTheDocument(); + expect(screen.queryByText('>>> downloaded')).not.toBeInTheDocument(); + }); + + it('toggles zoom on click via aria-label change', async () => { + const user = userEvent.setup(); + render(); + + const zoomContainer = screen.getByRole('button', { name: 'Zoom in' }); + expect(zoomContainer).toBeInTheDocument(); + + await user.click(zoomContainer); + expect(screen.getByRole('button', { name: 'Zoom out' })).toBeInTheDocument(); + + await user.click(screen.getByRole('button', { name: 'Zoom out' })); + expect(screen.getByRole('button', { name: 'Zoom in' })).toBeInTheDocument(); + }); + + it('renders nothing special when currentImpl is null', () => { + render( + , + ); + expect(screen.queryByRole('img')).not.toBeInTheDocument(); + expect(screen.queryByRole('button', { name: /copy code/i })).not.toBeInTheDocument(); + }); +}); diff --git a/app/src/components/SpecOverview.test.tsx b/app/src/components/SpecOverview.test.tsx new file mode 100644 index 0000000000..2b4f72852c --- /dev/null +++ b/app/src/components/SpecOverview.test.tsx @@ -0,0 +1,181 @@ +import { describe, it, expect, vi } from 'vitest'; +import { render, screen, userEvent } from '../test-utils'; +import { SpecOverview } from './SpecOverview'; +import type { Implementation } from '../types'; + +vi.mock('../utils/responsiveImage', () => ({ + buildSrcSet: (url: string, fmt: string) => `${url}-srcset-${fmt}`, + OVERVIEW_SIZES: '33vw', +})); + +const makeImpl = (overrides: Partial = {}): Implementation => ({ + library_id: 'matplotlib', + library_name: 'Matplotlib', + preview_url: 'https://example.com/plot.png', + preview_html: undefined, + quality_score: 85, + code: 'print("hello")', + ...overrides, +}); + +const implA = makeImpl({ library_id: 'altair', library_name: 'Altair', quality_score: 72 }); +const implB = makeImpl({ library_id: 'matplotlib', library_name: 'Matplotlib', quality_score: 85 }); +const implC = makeImpl({ + library_id: 'plotly', + library_name: 'Plotly', + quality_score: 90, + preview_html: '
interactive
', +}); + +const defaultGetLibraryMeta = (id: string) => ({ + id, + name: id, + description: `${id} description`, + documentation_url: `https://${id}.org`, +}); + +const defaultProps = { + specId: 'scatter-basic', + specTitle: 'Basic Scatter Plot', + implementations: [implC, implA, implB], + codeCopied: null, + downloadDone: null, + openTooltip: null, + onImplClick: vi.fn(), + onCopyCode: vi.fn(), + onDownload: vi.fn(), + onTooltipToggle: vi.fn(), + getLibraryMeta: defaultGetLibraryMeta, + onTrackEvent: vi.fn(), +}; + +describe('SpecOverview', () => { + it('renders all implementation cards', () => { + render(); + const images = screen.getAllByRole('img'); + expect(images).toHaveLength(3); + }); + + it('cards are sorted alphabetically by library_id', () => { + render(); + const images = screen.getAllByRole('img'); + // Sorted: altair, matplotlib, plotly + expect(images[0]).toHaveAttribute('alt', 'Basic Scatter Plot - altair'); + expect(images[1]).toHaveAttribute('alt', 'Basic Scatter Plot - matplotlib'); + expect(images[2]).toHaveAttribute('alt', 'Basic Scatter Plot - plotly'); + }); + + it('shows quality score for each implementation', () => { + render(); + expect(screen.getByText('72')).toBeInTheDocument(); + expect(screen.getByText('85')).toBeInTheDocument(); + expect(screen.getByText('90')).toBeInTheDocument(); + }); + + it('does not show quality score when null', () => { + const implNoScore = makeImpl({ library_id: 'seaborn', quality_score: null }); + render( + , + ); + expect(screen.getByText('seaborn')).toBeInTheDocument(); + // No score rendered - only the library name text and no numeric text + const allText = screen.getByText('seaborn').closest('[class]')?.parentElement?.textContent; + expect(allText).not.toMatch(/\d+/); + }); + + it('shows library name below each card', () => { + render(); + expect(screen.getByText('altair')).toBeInTheDocument(); + expect(screen.getByText('matplotlib')).toBeInTheDocument(); + expect(screen.getByText('plotly')).toBeInTheDocument(); + }); + + it('calls onImplClick when a card is clicked', async () => { + const onImplClick = vi.fn(); + const user = userEvent.setup(); + + render(); + + // Click on the first image (altair, sorted) + const images = screen.getAllByRole('img'); + await user.click(images[0]); + expect(onImplClick).toHaveBeenCalledWith('altair'); + }); + + it('renders Copy Code button for each card that calls onCopyCode', async () => { + const onCopyCode = vi.fn(); + const user = userEvent.setup(); + + render(); + + const copyButtons = screen.getAllByRole('button', { name: /copy code/i }); + expect(copyButtons).toHaveLength(3); + + await user.click(copyButtons[0]); + // First sorted impl is altair + expect(onCopyCode).toHaveBeenCalledWith(implA); + }); + + it('renders Download PNG button for each card that calls onDownload', async () => { + const onDownload = vi.fn(); + const user = userEvent.setup(); + + render(); + + const downloadButtons = screen.getAllByRole('button', { name: /download png/i }); + expect(downloadButtons).toHaveLength(3); + + await user.click(downloadButtons[1]); + // Second sorted impl is matplotlib + expect(onDownload).toHaveBeenCalledWith(implB); + }); + + it('shows Open Interactive only for implementations with preview_html', () => { + render(); + // Only plotly has preview_html + const interactiveLinks = screen.getAllByRole('link', { name: /open interactive/i }); + expect(interactiveLinks).toHaveLength(1); + }); + + it('shows skeleton when an implementation has no preview_url', () => { + const implNoPreview = makeImpl({ + library_id: 'bokeh', + library_name: 'Bokeh', + preview_url: '' as unknown as string, + }); + // SpecOverview checks `impl.preview_url` as truthy -> falsy string renders skeleton + const { container } = render( + , + ); + const skeleton = container.querySelector('.MuiSkeleton-root'); + expect(skeleton).toBeInTheDocument(); + }); + + it('shows ">>> copied" overlay when codeCopied matches a library_id', () => { + render( + , + ); + expect(screen.getByText('>>> copied')).toBeInTheDocument(); + }); + + it('shows ">>> downloaded" overlay when downloadDone matches a library_id', () => { + render( + , + ); + expect(screen.getByText('>>> downloaded')).toBeInTheDocument(); + }); + + it('does not show overlay when codeCopied does not match any library_id', () => { + render( + , + ); + expect(screen.queryByText('>>> copied')).not.toBeInTheDocument(); + expect(screen.queryByText('>>> downloaded')).not.toBeInTheDocument(); + }); +}); diff --git a/app/src/components/SpecTabs.test.tsx b/app/src/components/SpecTabs.test.tsx new file mode 100644 index 0000000000..a4bebec38a --- /dev/null +++ b/app/src/components/SpecTabs.test.tsx @@ -0,0 +1,466 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { render, screen, waitFor, userEvent } from '../test-utils'; +import { SpecTabs } from './SpecTabs'; + +// Mock the lazy-loaded CodeHighlighter +vi.mock('./CodeHighlighter', () => ({ + default: ({ code }: { code: string }) => ( +
{code}
+ ), +})); + +// Mock the constants module to avoid import.meta issues +vi.mock('../constants', () => ({ + API_URL: 'http://localhost:8000', +})); + +beforeEach(() => { + vi.restoreAllMocks(); + // Mock fetch globally - return tag counts + global.fetch = vi.fn().mockResolvedValue({ + ok: true, + json: () => + Promise.resolve({ + globalCounts: { + plot: { scatter: 42, line: 30 }, + data: { numeric: 20 }, + dep: { numpy: 50 }, + }, + }), + }); +}); + +const baseProps = { + code: 'import matplotlib\nprint("hello")', + specId: 'scatter-basic', + title: 'Basic Scatter Plot', + description: 'A scatter plot showing data points', + libraryId: 'matplotlib', + qualityScore: null as number | null, +}; + +describe('SpecTabs', () => { + // ------------------------------------------------------- + // 1. Rendering in default mode (all 4 tabs visible) + // ------------------------------------------------------- + it('renders all 4 tabs in default mode', () => { + render(); + expect(screen.getByRole('tab', { name: /code/i })).toBeInTheDocument(); + expect(screen.getByRole('tab', { name: /spec/i })).toBeInTheDocument(); + expect(screen.getByRole('tab', { name: /impl/i })).toBeInTheDocument(); + expect(screen.getByRole('tab', { name: /quality/i })).toBeInTheDocument(); + }); + + // ------------------------------------------------------- + // 2. Rendering in overviewMode (only Spec tab) + // ------------------------------------------------------- + it('renders only the Spec tab in overviewMode', () => { + render(); + expect(screen.queryByRole('tab', { name: /code/i })).not.toBeInTheDocument(); + expect(screen.getByRole('tab', { name: /spec/i })).toBeInTheDocument(); + expect(screen.queryByRole('tab', { name: /impl/i })).not.toBeInTheDocument(); + expect(screen.queryByRole('tab', { name: /quality/i })).not.toBeInTheDocument(); + }); + + // ------------------------------------------------------- + // 3. Tab click opens content, second click collapses + // ------------------------------------------------------- + it('opens tab content on click and collapses on second click', async () => { + const user = userEvent.setup(); + const onTrackEvent = vi.fn(); + render(); + + const specTab = screen.getByRole('tab', { name: /spec/i }); + + // Click to open + await user.click(specTab); + expect(screen.getByText('Basic Scatter Plot')).toBeInTheDocument(); + expect(onTrackEvent).toHaveBeenCalledWith('tab_toggle', { + action: 'open', + tab: 'specification', + library: 'matplotlib', + }); + + // Click again to collapse + await user.click(specTab); + expect(onTrackEvent).toHaveBeenCalledWith('tab_toggle', { + action: 'close', + tab: 'specification', + library: 'matplotlib', + }); + }); + + // ------------------------------------------------------- + // 4. Specification tab content + // ------------------------------------------------------- + it('shows specification content: title, description, applications, data, notes', async () => { + const user = userEvent.setup(); + render( + , + ); + + await user.click(screen.getByRole('tab', { name: /spec/i })); + + // Title + expect(screen.getByText('Basic Scatter Plot')).toBeInTheDocument(); + + // Description heading and text + expect(screen.getByText('Description')).toBeInTheDocument(); + expect(screen.getByText('A scatter plot showing data points')).toBeInTheDocument(); + + // Applications + expect(screen.getByText('Applications')).toBeInTheDocument(); + expect(screen.getByText('Data analysis')).toBeInTheDocument(); + expect(screen.getByText('Statistical visualization')).toBeInTheDocument(); + + // Data + expect(screen.getByText('Data')).toBeInTheDocument(); + expect(screen.getByText('Random numeric data')).toBeInTheDocument(); + expect(screen.getByText('CSV files')).toBeInTheDocument(); + + // Notes + expect(screen.getByText('Notes')).toBeInTheDocument(); + expect(screen.getByText('Use for small datasets')).toBeInTheDocument(); + expect(screen.getByText('Works best with 2D data')).toBeInTheDocument(); + }); + + // ------------------------------------------------------- + // 5. Implementation tab content + // ------------------------------------------------------- + it('shows implementation content: imageDescription, strengths, weaknesses', async () => { + const user = userEvent.setup(); + render( + , + ); + + await user.click(screen.getByRole('tab', { name: /impl/i })); + + expect(screen.getByText('A colorful scatter plot with labeled axes')).toBeInTheDocument(); + + expect(screen.getByText('Strengths')).toBeInTheDocument(); + expect(screen.getByText('Clear layout')).toBeInTheDocument(); + expect(screen.getByText('Good color choices')).toBeInTheDocument(); + + expect(screen.getByText('Weaknesses')).toBeInTheDocument(); + expect(screen.getByText('Missing legend')).toBeInTheDocument(); + expect(screen.getByText('Overlapping points')).toBeInTheDocument(); + }); + + // ------------------------------------------------------- + // 6. Quality tab: score display + // ------------------------------------------------------- + it('shows quality score formatted as score/100', async () => { + const user = userEvent.setup(); + render(); + + await user.click(screen.getByRole('tab', { name: /85/i })); + + expect(screen.getByText('85/100')).toBeInTheDocument(); + }); + + it('shows N/A when quality score is null', async () => { + const user = userEvent.setup(); + render(); + + await user.click(screen.getByRole('tab', { name: /quality/i })); + + expect(screen.getByText('N/A')).toBeInTheDocument(); + }); + + // ------------------------------------------------------- + // 7. Quality tab: criteria breakdown + // ------------------------------------------------------- + it('shows criteria breakdown with category scores', async () => { + const user = userEvent.setup(); + const criteriaChecklist = { + visual_quality: { + score: 18, + max: 20, + items: [ + { id: 'vq1', name: 'Color harmony', score: 9, max: 10, passed: true, comment: 'Great palette' }, + { id: 'vq2', name: 'Layout balance', score: 9, max: 10, passed: true }, + ], + }, + accuracy: { + score: 15, + max: 20, + items: [], + }, + }; + render(); + + await user.click(screen.getByRole('tab', { name: /92/i })); + + expect(screen.getByText('Breakdown')).toBeInTheDocument(); + expect(screen.getByText('visual quality')).toBeInTheDocument(); + expect(screen.getByText('18/20')).toBeInTheDocument(); + expect(screen.getByText('accuracy')).toBeInTheDocument(); + expect(screen.getByText('15/20')).toBeInTheDocument(); + }); + + // ------------------------------------------------------- + // 8. Tags display + // ------------------------------------------------------- + it('displays spec tags and impl tags', () => { + render( + , + ); + + // Spec tags + expect(screen.getByText('scatter')).toBeInTheDocument(); + expect(screen.getByText('bubble')).toBeInTheDocument(); + expect(screen.getByText('numeric')).toBeInTheDocument(); + + // Impl tags + expect(screen.getByText('numpy')).toBeInTheDocument(); + expect(screen.getByText('vectorized')).toBeInTheDocument(); + + // Category labels + expect(screen.getByText('plot type:')).toBeInTheDocument(); + expect(screen.getByText('data type:')).toBeInTheDocument(); + expect(screen.getByText('dependencies:')).toBeInTheDocument(); + expect(screen.getByText('techniques:')).toBeInTheDocument(); + }); + + it('does not display impl tags in overviewMode', () => { + render( + , + ); + + expect(screen.getByText('scatter')).toBeInTheDocument(); + expect(screen.queryByText('numpy')).not.toBeInTheDocument(); + }); + + // ------------------------------------------------------- + // 9. Copy code functionality + // ------------------------------------------------------- + it('copies code to clipboard and fires tracking event', async () => { + const user = userEvent.setup(); + const writeText = vi.fn().mockResolvedValue(undefined); + Object.defineProperty(navigator, 'clipboard', { + value: { writeText }, + writable: true, + configurable: true, + }); + + const onTrackEvent = vi.fn(); + render(); + + // Open Code tab + await user.click(screen.getByRole('tab', { name: /code/i })); + + // Click copy button + const copyButton = screen.getByRole('button', { name: /copy code/i }); + await user.click(copyButton); + + expect(writeText).toHaveBeenCalledWith('import matplotlib\nprint("hello")'); + expect(onTrackEvent).toHaveBeenCalledWith('copy_code', { + spec: 'scatter-basic', + library: 'matplotlib', + method: 'tab', + page: 'spec_detail', + }); + }); + + // ------------------------------------------------------- + // 10. "No quality data available" message + // ------------------------------------------------------- + it('shows "No quality data available." when no score and no checklist', async () => { + const user = userEvent.setup(); + render(); + + await user.click(screen.getByRole('tab', { name: /quality/i })); + + expect(screen.getByText('No quality data available.')).toBeInTheDocument(); + }); + + it('does not show "No quality data" when score is present', async () => { + const user = userEvent.setup(); + render(); + + await user.click(screen.getByRole('tab', { name: /75/i })); + + expect(screen.queryByText('No quality data available.')).not.toBeInTheDocument(); + }); + + // ------------------------------------------------------- + // 11. "No implementation review data available" message + // ------------------------------------------------------- + it('shows "No implementation review data available." when no impl data', async () => { + const user = userEvent.setup(); + render( + , + ); + + await user.click(screen.getByRole('tab', { name: /impl/i })); + + expect(screen.getByText('No implementation review data available.')).toBeInTheDocument(); + }); + + it('does not show "No implementation review data" when imageDescription is present', async () => { + const user = userEvent.setup(); + render(); + + await user.click(screen.getByRole('tab', { name: /impl/i })); + + expect( + screen.queryByText('No implementation review data available.'), + ).not.toBeInTheDocument(); + }); + + // ------------------------------------------------------- + // 12. Quality tab label shows numeric score when present + // ------------------------------------------------------- + it('shows numeric score in the Quality tab label', () => { + render(); + expect(screen.getByRole('tab', { name: /93/i })).toBeInTheDocument(); + }); + + it('shows "Quality" in the tab label when score is null', () => { + render(); + expect(screen.getByRole('tab', { name: /quality/i })).toBeInTheDocument(); + }); + + // ------------------------------------------------------- + // 13. Tag counts fetched and displayed + // ------------------------------------------------------- + it('renders tags and fetch mock is configured for tag counts', async () => { + // The module-level cache means fetch may or may not be called depending + // on test execution order. We verify the component renders tags correctly. + render( + , + ); + + // Tag chip is rendered + expect(screen.getByText('scatter')).toBeInTheDocument(); + + // Fetch is mocked for tag counts (may be called if cache is empty) + expect(global.fetch).toBeDefined(); + }); + + // ------------------------------------------------------- + // 14. Implementation tab metadata (specId, libraryId, date) + // ------------------------------------------------------- + it('shows metadata in the implementation tab', async () => { + const user = userEvent.setup(); + render( + , + ); + + await user.click(screen.getByRole('tab', { name: /impl/i })); + + // The metadata line contains specId, libraryId, and date together + expect( + screen.getByText(/scatter-basic · matplotlib · Jan 15, 2025/), + ).toBeInTheDocument(); + }); + + // ------------------------------------------------------- + // 15. Code tab shows code via CodeHighlighter + // ------------------------------------------------------- + it('renders CodeHighlighter with code when Code tab is open', async () => { + const user = userEvent.setup(); + render(); + + await user.click(screen.getByRole('tab', { name: /code/i })); + + await waitFor(() => { + expect(screen.getByTestId('code-highlighter')).toBeInTheDocument(); + }); + expect(screen.getByTestId('code-highlighter')).toHaveTextContent( + 'import matplotlib print("hello")', + ); + }); + + // ------------------------------------------------------- + // 16. Tag click handler navigates + // ------------------------------------------------------- + it('fires onTrackEvent and navigates on tag click', async () => { + const user = userEvent.setup(); + const onTrackEvent = vi.fn(); + // Mock window.location.href setter + const hrefSetter = vi.fn(); + Object.defineProperty(window, 'location', { + value: { href: '' }, + writable: true, + }); + Object.defineProperty(window.location, 'href', { + set: hrefSetter, + get: () => '', + }); + + render( + , + ); + + await user.click(screen.getByText('scatter')); + + expect(onTrackEvent).toHaveBeenCalledWith('tag_click', { + param: 'plot', + value: 'scatter', + source: 'spec_detail', + }); + }); + + // ------------------------------------------------------- + // 17. Expanding criteria categories + // ------------------------------------------------------- + it('expands criteria items on category click', async () => { + const user = userEvent.setup(); + const criteriaChecklist = { + visual_quality: { + score: 18, + max: 20, + items: [ + { id: 'vq1', name: 'Color harmony', score: 9, max: 10, passed: true, comment: 'Looks great' }, + ], + }, + }; + render(); + + await user.click(screen.getByRole('tab', { name: /90/i })); + + // Click the category to expand + await user.click(screen.getByText('visual quality')); + + // Now the item details should be visible + expect(screen.getByText('Color harmony')).toBeInTheDocument(); + expect(screen.getByText('9/10')).toBeInTheDocument(); + expect(screen.getByText('Looks great')).toBeInTheDocument(); + }); +}); diff --git a/app/src/hooks/useFilterState-extended.test.ts b/app/src/hooks/useFilterState-extended.test.ts new file mode 100644 index 0000000000..7f8214f5ab --- /dev/null +++ b/app/src/hooks/useFilterState-extended.test.ts @@ -0,0 +1,71 @@ +import { describe, it, expect } from 'vitest'; +import { isFiltersEmpty } from './useFilterState'; +import type { ActiveFilters } from '../types'; + +describe('isFiltersEmpty - extended', () => { + it('returns true for a single group with empty values', () => { + const filters: ActiveFilters = [{ category: 'spec', values: [] }]; + expect(isFiltersEmpty(filters)).toBe(true); + }); + + it('returns true for many groups all with empty values', () => { + const filters: ActiveFilters = [ + { category: 'lib', values: [] }, + { category: 'plot', values: [] }, + { category: 'spec', values: [] }, + { category: 'data', values: [] }, + { category: 'feat', values: [] }, + ]; + expect(isFiltersEmpty(filters)).toBe(true); + }); + + it('returns false when only the last group has values', () => { + const filters: ActiveFilters = [ + { category: 'lib', values: [] }, + { category: 'plot', values: [] }, + { category: 'spec', values: ['scatter-basic'] }, + ]; + expect(isFiltersEmpty(filters)).toBe(false); + }); + + it('returns false when first group has values and rest are empty', () => { + const filters: ActiveFilters = [ + { category: 'lib', values: ['matplotlib'] }, + { category: 'plot', values: [] }, + ]; + expect(isFiltersEmpty(filters)).toBe(false); + }); + + it('returns false with a single group containing one value', () => { + const filters: ActiveFilters = [ + { category: 'dep', values: ['numpy'] }, + ]; + expect(isFiltersEmpty(filters)).toBe(false); + }); + + it('returns false when all groups have values', () => { + const filters: ActiveFilters = [ + { category: 'lib', values: ['matplotlib'] }, + { category: 'plot', values: ['scatter', 'heatmap'] }, + { category: 'feat', values: ['legend'] }, + ]; + expect(isFiltersEmpty(filters)).toBe(false); + }); + + it('handles every FilterCategory with empty values as true', () => { + const filters: ActiveFilters = [ + { category: 'lib', values: [] }, + { category: 'spec', values: [] }, + { category: 'plot', values: [] }, + { category: 'data', values: [] }, + { category: 'dom', values: [] }, + { category: 'feat', values: [] }, + { category: 'dep', values: [] }, + { category: 'tech', values: [] }, + { category: 'pat', values: [] }, + { category: 'prep', values: [] }, + { category: 'style', values: [] }, + ]; + expect(isFiltersEmpty(filters)).toBe(true); + }); +}); diff --git a/app/src/pages/CatalogPage.test.tsx b/app/src/pages/CatalogPage.test.tsx new file mode 100644 index 0000000000..8e3184e4a1 --- /dev/null +++ b/app/src/pages/CatalogPage.test.tsx @@ -0,0 +1,139 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { render, screen, waitFor } from '../test-utils'; +import { CatalogPage } from './CatalogPage'; + +vi.mock('react-helmet-async', () => ({ + Helmet: ({ children }: { children: React.ReactNode }) => <>{children}, +})); + +vi.mock('../hooks', () => ({ + useAnalytics: () => ({ + trackPageview: vi.fn(), + trackEvent: vi.fn(), + }), + useAppData: () => ({ + specsData: [ + { id: 'bar-basic', title: 'Basic Bar Chart', description: 'A simple bar chart' }, + { id: 'scatter-basic', title: 'Basic Scatter Plot', description: 'A scatter plot' }, + ], + }), + useHomeState: () => ({ + saveScrollPosition: vi.fn(), + }), +})); + +vi.mock('../utils/responsiveImage', () => ({ + buildSrcSet: (url: string, _format: string) => url, + getFallbackSrc: (url: string) => url, + CATALOG_SIZES: '280px', +})); + +const mockImages = { + images: [ + { library: 'matplotlib', url: 'https://example.com/bar-basic/matplotlib/plot.png', spec_id: 'bar-basic' }, + { library: 'seaborn', url: 'https://example.com/bar-basic/seaborn/plot.png', spec_id: 'bar-basic' }, + { library: 'matplotlib', url: 'https://example.com/scatter-basic/matplotlib/plot.png', spec_id: 'scatter-basic' }, + ], +}; + +beforeEach(() => { + vi.restoreAllMocks(); +}); + +function mockFetchSuccess() { + global.fetch = vi.fn().mockResolvedValue({ + ok: true, + json: () => Promise.resolve(mockImages), + }); +} + +function mockFetchError() { + global.fetch = vi.fn().mockRejectedValue(new Error('Network error')); +} + +describe('CatalogPage', () => { + it('shows loading state initially', () => { + // Never-resolving fetch keeps loading=true + global.fetch = vi.fn().mockReturnValue(new Promise(() => {})); + render(); + + // Loading state renders Skeleton placeholders (MUI Skeleton uses role="progressbar" internally, but we can check for the skeleton structure) + // The loading branch renders multiple Skeleton elements; heading text should NOT be present + expect(screen.queryByText('catalog')).not.toBeInTheDocument(); + }); + + it('renders specs after successful fetch', async () => { + mockFetchSuccess(); + render(); + + await waitFor(() => { + expect(screen.getByText('Basic Bar Chart')).toBeInTheDocument(); + }); + expect(screen.getByText('Basic Scatter Plot')).toBeInTheDocument(); + expect(screen.getByText('A simple bar chart')).toBeInTheDocument(); + expect(screen.getByText('A scatter plot')).toBeInTheDocument(); + }); + + it('handles fetch error gracefully', async () => { + const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + mockFetchError(); + render(); + + // After error, loading ends and we get the catalog heading (with 0 specs matched since no images loaded) + await waitFor(() => { + expect(screen.getByRole('heading', { level: 1 })).toBeInTheDocument(); + }); + expect(screen.getByText('0 specifications')).toBeInTheDocument(); + consoleSpy.mockRestore(); + }); + + it('renders breadcrumb navigation', async () => { + mockFetchSuccess(); + render(); + + await waitFor(() => { + expect(screen.getByRole('heading', { level: 1 })).toBeInTheDocument(); + }); + expect(screen.getByRole('navigation', { name: 'breadcrumb' })).toBeInTheDocument(); + }); + + it('renders footer', async () => { + mockFetchSuccess(); + render(); + + await waitFor(() => { + expect(screen.getByRole('heading', { level: 1 })).toBeInTheDocument(); + }); + expect(screen.getByText('github')).toBeInTheDocument(); + }); + + it('has page title text', async () => { + mockFetchSuccess(); + render(); + + await waitFor(() => { + expect(screen.getByRole('heading', { level: 1 })).toBeInTheDocument(); + }); + expect(screen.getByRole('heading', { level: 1 })).toHaveTextContent('catalog'); + }); + + it('shows specification count', async () => { + mockFetchSuccess(); + render(); + + await waitFor(() => { + expect(screen.getByText('2 specifications')).toBeInTheDocument(); + }); + }); + + it('calls fetch with /plots/filter endpoint', async () => { + mockFetchSuccess(); + render(); + + await waitFor(() => { + expect(global.fetch).toHaveBeenCalled(); + }); + const fetchUrl = (global.fetch as ReturnType).mock.calls[0][0] as string; + expect(fetchUrl).toContain('/plots/filter'); + }); +}); diff --git a/app/src/pages/DebugPage.test.tsx b/app/src/pages/DebugPage.test.tsx new file mode 100644 index 0000000000..bdeb721f70 --- /dev/null +++ b/app/src/pages/DebugPage.test.tsx @@ -0,0 +1,92 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { render, screen, waitFor } from '../test-utils'; + +vi.mock('../components/Breadcrumb', () => ({ + Breadcrumb: () => , +})); + +import { DebugPage } from './DebugPage'; + +const mockDebugData = { + total_specs: 100, + total_implementations: 500, + coverage_percent: 85.5, + library_stats: [ + { id: 'matplotlib', name: 'Matplotlib', impl_count: 80, avg_score: 91.5, min_score: 60, max_score: 99 }, + ], + low_score_specs: [], + oldest_specs: [], + missing_preview_specs: [], + missing_tags_specs: [], + system: { + database_connected: true, + api_response_time_ms: 42, + timestamp: '2025-01-15T10:00:00Z', + total_specs_in_db: 100, + total_impls_in_db: 500, + }, + specs: [ + { + id: 'scatter-basic', + title: 'Basic Scatter', + updated: '2025-01-01', + avg_score: 92, + altair: 90, bokeh: 91, highcharts: null, letsplot: null, + matplotlib: 95, plotly: 88, plotnine: null, pygal: null, seaborn: 94, + }, + ], +}; + +describe('DebugPage', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('shows loading state initially', () => { + vi.stubGlobal('fetch', vi.fn(() => new Promise(() => {}))); + render(); + // During loading, shows skeletons + const skeletons = document.querySelectorAll('.MuiSkeleton-root'); + expect(skeletons.length).toBeGreaterThan(0); + }); + + it('renders debug data after fetch', async () => { + vi.stubGlobal('fetch', vi.fn(() => + Promise.resolve({ + ok: true, + json: () => Promise.resolve(mockDebugData), + }) + )); + + render(); + + await waitFor(() => { + expect(screen.getByText('scatter-basic')).toBeInTheDocument(); + }); + }); + + it('handles fetch error gracefully', async () => { + vi.stubGlobal('fetch', vi.fn(() => + Promise.resolve({ ok: false, status: 500 }) + )); + + render(); + + await waitFor(() => { + expect(screen.getByText(/failed to load/i)).toBeInTheDocument(); + }); + }); + + it('renders breadcrumb after data loads', async () => { + vi.stubGlobal('fetch', vi.fn(() => + Promise.resolve({ + ok: true, + json: () => Promise.resolve(mockDebugData), + }) + )); + render(); + await waitFor(() => { + expect(screen.getByTestId('breadcrumb')).toBeInTheDocument(); + }); + }); +}); diff --git a/app/src/pages/HomePage.test.tsx b/app/src/pages/HomePage.test.tsx new file mode 100644 index 0000000000..b1ded4ff58 --- /dev/null +++ b/app/src/pages/HomePage.test.tsx @@ -0,0 +1,91 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { render, screen } from '../test-utils'; + +// Mock hooks +vi.mock('../hooks', () => ({ + useAnalytics: () => ({ trackPageview: vi.fn(), trackEvent: vi.fn() }), + useInfiniteScroll: () => ({ loadMoreRef: { current: null } }), + useFilterState: () => ({ + activeFilters: [], + filterCounts: null, + globalCounts: null, + orCounts: [], + specTitles: {}, + allImages: [], + displayedImages: [], + hasMore: false, + loading: false, + error: '', + setDisplayedImages: vi.fn(), + setHasMore: vi.fn(), + setError: vi.fn(), + handleAddFilter: vi.fn(), + handleAddValueToGroup: vi.fn(), + handleRemoveFilter: vi.fn(), + handleRemoveGroup: vi.fn(), + handleRandom: vi.fn(), + randomAnimation: null, + }), + isFiltersEmpty: (f: unknown[]) => !f || f.length === 0, + useAppData: () => ({ specsData: [], librariesData: [], stats: null }), + useHomeState: () => ({ + homeStateRef: { current: { scrollY: 0, imageSize: 'normal', searchExpanded: false } }, + saveScrollPosition: vi.fn(), + setHomeState: vi.fn(), + homeState: { scrollY: 0, imageSize: 'normal', searchExpanded: false }, + }), +})); + +vi.mock('react-helmet-async', () => ({ + Helmet: ({ children }: { children: React.ReactNode }) =>
{children}
, +})); + +vi.mock('../components/Header', () => ({ + Header: () =>
Header
, +})); + +vi.mock('../components/Footer', () => ({ + Footer: () =>
Footer
, +})); + +vi.mock('../components/FilterBar', () => ({ + FilterBar: () =>
FilterBar
, +})); + +vi.mock('../components/ImagesGrid', () => ({ + ImagesGrid: () =>
ImagesGrid
, +})); + +vi.mock('../components/PlotOfTheDay', () => ({ + PlotOfTheDay: () =>
PlotOfTheDay
, +})); + +import { HomePage } from './HomePage'; + +describe('HomePage', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('renders the main components', () => { + render(); + expect(screen.getByTestId('header')).toBeInTheDocument(); + expect(screen.getByTestId('footer')).toBeInTheDocument(); + expect(screen.getByTestId('filterbar')).toBeInTheDocument(); + }); + + it('shows PlotOfTheDay when no filters active', () => { + render(); + expect(screen.getByTestId('potd')).toBeInTheDocument(); + }); + + it('renders images grid', () => { + render(); + expect(screen.getByTestId('images-grid')).toBeInTheDocument(); + }); + + it('renders Helmet for SEO', () => { + render(); + expect(screen.getByTestId('helmet')).toBeInTheDocument(); + }); +}); diff --git a/app/src/pages/InteractivePage.test.tsx b/app/src/pages/InteractivePage.test.tsx new file mode 100644 index 0000000000..93cdb64da3 --- /dev/null +++ b/app/src/pages/InteractivePage.test.tsx @@ -0,0 +1,96 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { render, screen, waitFor } from '@testing-library/react'; +import { ThemeProvider, createTheme } from '@mui/material/styles'; +import { MemoryRouter, Route, Routes } from 'react-router-dom'; +import { HelmetProvider } from 'react-helmet-async'; + +vi.mock('../hooks', () => ({ + useAnalytics: () => ({ trackPageview: vi.fn(), trackEvent: vi.fn() }), +})); + +vi.mock('../components/Breadcrumb', () => ({ + Breadcrumb: () => , +})); + +import { InteractivePage } from './InteractivePage'; + +const theme = createTheme(); + +function renderWithRoute(specId: string, library: string) { + return render( + + + + + } /> + + + + + ); +} + +const mockSpecData = { + id: 'scatter-basic', + title: 'Basic Scatter Plot', + implementations: [ + { library_id: 'plotly', preview_html: 'https://example.com/scatter.html' }, + { library_id: 'matplotlib', preview_html: null }, + ], +}; + +describe('InteractivePage', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('shows loading spinner initially', () => { + vi.stubGlobal('fetch', vi.fn(() => new Promise(() => {}))); + renderWithRoute('scatter-basic', 'plotly'); + expect(screen.getByRole('progressbar')).toBeInTheDocument(); + }); + + it('renders iframe after successful fetch', async () => { + vi.stubGlobal('fetch', vi.fn(() => + Promise.resolve({ + ok: true, + json: () => Promise.resolve(mockSpecData), + }) + )); + + renderWithRoute('scatter-basic', 'plotly'); + + await waitFor(() => { + const iframe = document.querySelector('iframe'); + expect(iframe).toBeTruthy(); + }); + }); + + it('shows error when no interactive HTML available', async () => { + vi.stubGlobal('fetch', vi.fn(() => + Promise.resolve({ + ok: true, + json: () => Promise.resolve({ + ...mockSpecData, + implementations: [{ library_id: 'matplotlib', preview_html: null }], + }), + }) + )); + + renderWithRoute('scatter-basic', 'plotly'); + + await waitFor(() => { + expect(screen.getByText(/no interactive/i)).toBeInTheDocument(); + }); + }); + + it('shows error on fetch failure', async () => { + vi.stubGlobal('fetch', vi.fn(() => Promise.reject(new Error('Network error')))); + + renderWithRoute('scatter-basic', 'plotly'); + + await waitFor(() => { + expect(screen.getByText(/failed|error/i)).toBeInTheDocument(); + }); + }); +}); diff --git a/app/src/pages/SpecPage.test.tsx b/app/src/pages/SpecPage.test.tsx new file mode 100644 index 0000000000..c0582e1cfa --- /dev/null +++ b/app/src/pages/SpecPage.test.tsx @@ -0,0 +1,200 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { render, screen, waitFor } from '../test-utils'; +import { SpecPage } from './SpecPage'; + +const mockNavigate = vi.fn(); +let mockParams: Record = { specId: 'scatter-basic' }; + +vi.mock('react-router-dom', async () => { + const actual = await vi.importActual('react-router-dom'); + return { + ...actual, + useParams: () => mockParams, + useNavigate: () => mockNavigate, + }; +}); + +vi.mock('react-helmet-async', () => ({ + Helmet: ({ children }: { children: React.ReactNode }) => <>{children}, +})); + +vi.mock('../hooks', () => ({ + useAnalytics: () => ({ + trackPageview: vi.fn(), + trackEvent: vi.fn(), + }), + useAppData: () => ({ + librariesData: [ + { id: 'matplotlib', name: 'Matplotlib' }, + { id: 'seaborn', name: 'Seaborn' }, + ], + }), + useCodeFetch: () => ({ + fetchCode: vi.fn().mockResolvedValue(null), + getCode: vi.fn().mockReturnValue(null), + isLoading: false, + }), +})); + +// Mock lazy-loaded components as simple divs +vi.mock('../components/SpecTabs', () => ({ + SpecTabs: () =>
SpecTabs
, +})); + +vi.mock('../components/SpecOverview', () => ({ + SpecOverview: () =>
SpecOverview
, +})); + +vi.mock('../components/SpecDetailView', () => ({ + SpecDetailView: () =>
SpecDetailView
, +})); + +const mockSpecData = { + id: 'scatter-basic', + title: 'Basic Scatter Plot', + description: 'A scatter plot with basic configuration', + implementations: [ + { + library_id: 'matplotlib', + library_name: 'Matplotlib', + preview_url: 'https://example.com/scatter-basic/matplotlib/plot.png', + quality_score: 8, + code: null, + }, + { + library_id: 'seaborn', + library_name: 'Seaborn', + preview_url: 'https://example.com/scatter-basic/seaborn/plot.png', + quality_score: 7, + code: null, + }, + ], +}; + +beforeEach(() => { + vi.restoreAllMocks(); + mockParams = { specId: 'scatter-basic' }; + mockNavigate.mockReset(); +}); + +function mockFetchSuccess(data = mockSpecData) { + global.fetch = vi.fn().mockResolvedValue({ + ok: true, + json: () => Promise.resolve(data), + }); +} + +function mockFetch404() { + global.fetch = vi.fn().mockResolvedValue({ + ok: false, + status: 404, + json: () => Promise.resolve({}), + }); +} + +function mockFetchError() { + global.fetch = vi.fn().mockRejectedValue(new Error('Network error')); +} + +describe('SpecPage', () => { + it('shows loading state initially', () => { + // Never-resolving fetch keeps loading=true + global.fetch = vi.fn().mockReturnValue(new Promise(() => {})); + render(); + + // Loading state does NOT show the spec title + expect(screen.queryByText('Basic Scatter Plot')).not.toBeInTheDocument(); + }); + + it('renders spec title after fetch', async () => { + mockFetchSuccess(); + render(); + + await waitFor(() => { + expect(screen.getByRole('heading', { level: 1 })).toHaveTextContent('Basic Scatter Plot'); + }); + }); + + it('shows 404 page when spec not found', async () => { + mockFetch404(); + render(); + + await waitFor(() => { + expect(screen.getByText('404')).toBeInTheDocument(); + }); + expect(screen.getByText('page not found')).toBeInTheDocument(); + }); + + it('handles fetch error', async () => { + const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + mockFetchError(); + render(); + + await waitFor(() => { + expect(screen.getByText('Failed to load spec')).toBeInTheDocument(); + }); + consoleSpy.mockRestore(); + }); + + it('renders breadcrumb with spec id', async () => { + mockFetchSuccess(); + render(); + + await waitFor(() => { + expect(screen.getByRole('navigation', { name: 'breadcrumb' })).toBeInTheDocument(); + }); + // Breadcrumb should contain the spec id + expect(screen.getByText('scatter-basic')).toBeInTheDocument(); + }); + + it('renders overview mode when no library in URL params', async () => { + mockParams = { specId: 'scatter-basic' }; + mockFetchSuccess(); + render(); + + await waitFor(() => { + expect(screen.getByTestId('spec-overview')).toBeInTheDocument(); + }); + expect(screen.queryByTestId('spec-detail-view')).not.toBeInTheDocument(); + }); + + it('renders detail mode when library in URL params', async () => { + mockParams = { specId: 'scatter-basic', library: 'matplotlib' }; + mockFetchSuccess(); + render(); + + await waitFor(() => { + expect(screen.getByTestId('spec-detail-view')).toBeInTheDocument(); + }); + expect(screen.queryByTestId('spec-overview')).not.toBeInTheDocument(); + }); + + it('renders description text', async () => { + mockFetchSuccess(); + render(); + + await waitFor(() => { + expect(screen.getByText('A scatter plot with basic configuration')).toBeInTheDocument(); + }); + }); + + it('renders footer', async () => { + mockFetchSuccess(); + render(); + + await waitFor(() => { + expect(screen.getByText('github')).toBeInTheDocument(); + }); + }); + + it('calls fetch with correct spec endpoint', async () => { + mockFetchSuccess(); + render(); + + await waitFor(() => { + expect(global.fetch).toHaveBeenCalled(); + }); + const fetchUrl = (global.fetch as ReturnType).mock.calls[0][0] as string; + expect(fetchUrl).toContain('/specs/scatter-basic'); + }); +}); diff --git a/app/src/pages/StatsPage.test.tsx b/app/src/pages/StatsPage.test.tsx new file mode 100644 index 0000000000..4bb4e9f4d7 --- /dev/null +++ b/app/src/pages/StatsPage.test.tsx @@ -0,0 +1,206 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { render, screen, waitFor } from '../test-utils'; +import { StatsPage } from './StatsPage'; + +vi.mock('react-helmet-async', () => ({ + Helmet: ({ children }: { children: React.ReactNode }) => <>{children}, +})); + +vi.mock('../hooks', () => ({ + useAnalytics: () => ({ + trackPageview: vi.fn(), + trackEvent: vi.fn(), + }), +})); + +const mockDashboard = { + total_specs: 142, + total_implementations: 987, + total_interactive: 53, + total_lines_of_code: 245_600, + avg_quality_score: 82.5, + coverage_percent: 73, + library_stats: [ + { + id: 'matplotlib', + name: 'matplotlib', + impl_count: 120, + avg_score: 85, + min_score: 60, + max_score: 98, + score_buckets: { '50-55': 1, '75-80': 10, '90-95': 5 }, + loc_buckets: { '0-20': 2, '40-60': 5 }, + avg_loc: 78, + }, + ], + coverage_matrix: [ + { + spec_id: 'scatter-basic', + title: 'Basic Scatter Plot', + libraries: { + matplotlib: { score: 90, has_impl: true }, + plotly: { score: null, has_impl: false }, + }, + }, + ], + top_implementations: [ + { + spec_id: 'scatter-basic', + spec_title: 'Basic Scatter Plot', + library_id: 'matplotlib', + quality_score: 95, + preview_url: 'https://example.com/img.png', + }, + ], + tag_distribution: { + plot_type: { scatter: 42, line: 30 }, + data_type: { numeric: 80 }, + }, + score_distribution: { '50-60': 5, '60-70': 10, '70-80': 20, '80-90': 30, '90-100': 15 }, + timeline: [ + { month: '2025-01', count: 10 }, + { month: '2025-02', count: 20 }, + ], +}; + +function mockFetchSuccess() { + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + json: () => Promise.resolve(mockDashboard), + }), + ); +} + +function mockFetchError() { + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: false, + status: 500, + }), + ); +} + +describe('StatsPage', () => { + beforeEach(() => { + vi.restoreAllMocks(); + }); + + it('renders loading state initially', () => { + // fetch never resolves so component stays in loading + vi.stubGlobal('fetch', vi.fn().mockReturnValue(new Promise(() => {}))); + + render(); + + expect(screen.getByText('loading stats...')).toBeInTheDocument(); + }); + + it('renders dashboard with mock data after fetch', async () => { + mockFetchSuccess(); + + render(); + + await waitFor(() => { + expect(screen.getByText('specifications')).toBeInTheDocument(); + }); + + expect(screen.getByText('implementations')).toBeInTheDocument(); + expect(screen.getByText('libraries')).toBeInTheDocument(); + // "coverage" appears both as a stat card label and as a section heading + expect(screen.getAllByText('coverage').length).toBeGreaterThanOrEqual(2); + expect(screen.getByText('top rated')).toBeInTheDocument(); + expect(screen.getByText('tags')).toBeInTheDocument(); + }); + + it('handles fetch error gracefully', async () => { + mockFetchError(); + + render(); + + await waitFor(() => { + expect(screen.getByText(/failed to load stats/)).toBeInTheDocument(); + }); + }); + + it('shows all 6 summary stat values', async () => { + mockFetchSuccess(); + + render(); + + await waitFor(() => { + expect(screen.getByText('specifications')).toBeInTheDocument(); + }); + + // total_specs: 142 + expect(screen.getByText('142')).toBeInTheDocument(); + // total_implementations: 987 + expect(screen.getByText('987')).toBeInTheDocument(); + // total_interactive: 53 + expect(screen.getByText('53')).toBeInTheDocument(); + // total_lines_of_code: 245600 => formatNum => "245.6K" + expect(screen.getByText('245.6K')).toBeInTheDocument(); + // avg_quality_score: 82.5 => formatNum(82.5) => "82.5" (toLocaleString) + expect(screen.getByText('82.5')).toBeInTheDocument(); + // coverage_percent: 73 => "73%" + expect(screen.getByText('73%')).toBeInTheDocument(); + }); + + it('renders breadcrumb navigation', async () => { + mockFetchSuccess(); + + render(); + + await waitFor(() => { + expect(screen.getByRole('navigation', { name: 'breadcrumb' })).toBeInTheDocument(); + }); + }); + + it('renders footer with github link', async () => { + mockFetchSuccess(); + + render(); + + await waitFor(() => { + expect(screen.getByText('github')).toBeInTheDocument(); + }); + }); + + it('renders top implementation cards', async () => { + mockFetchSuccess(); + + render(); + + await waitFor(() => { + expect(screen.getByText('Basic Scatter Plot')).toBeInTheDocument(); + }); + + // "matplotlib" appears in library stats and in top implementation cards + expect(screen.getAllByText('matplotlib').length).toBeGreaterThanOrEqual(1); + expect(screen.getByText('95')).toBeInTheDocument(); + }); + + it('renders tag distribution categories', async () => { + mockFetchSuccess(); + + render(); + + await waitFor(() => { + expect(screen.getByText('plot type')).toBeInTheDocument(); + }); + + expect(screen.getByText('data type')).toBeInTheDocument(); + expect(screen.getByText('scatter')).toBeInTheDocument(); + }); + + it('renders timeline section', async () => { + mockFetchSuccess(); + + render(); + + await waitFor(() => { + expect(screen.getByText('timeline')).toBeInTheDocument(); + }); + }); +}); diff --git a/app/src/utils/filters-extended.test.ts b/app/src/utils/filters-extended.test.ts new file mode 100644 index 0000000000..a7bb9e1a1a --- /dev/null +++ b/app/src/utils/filters-extended.test.ts @@ -0,0 +1,193 @@ +import { describe, it, expect, vi } from 'vitest'; +import { + getCounts, + getSelectedValuesForCategory, + getAvailableValues, + getAvailableValuesForGroup, + getSearchResults, +} from './filters'; +import type { FilterCounts, ActiveFilters } from '../types'; + +const mockFilterCounts: FilterCounts = { + lib: { matplotlib: 10, seaborn: 8, plotly: 7, bokeh: 3 }, + spec: { 'scatter-basic': 5, 'heatmap-correlation': 4 }, + plot: { scatter: 8, heatmap: 4, bar: 6 }, + data: { tabular: 12, timeseries: 5 }, + dom: { science: 3 }, + feat: { legend: 7, colorbar: 4 }, + dep: {}, + tech: {}, + pat: {}, + prep: {}, + style: {}, +}; + +describe('getAvailableValues', () => { + it('returns all values sorted by count when no filters are active', () => { + const result = getAvailableValues(mockFilterCounts, [], 'lib'); + + expect(result).toEqual([ + ['matplotlib', 10], + ['seaborn', 8], + ['plotly', 7], + ['bokeh', 3], + ]); + }); + + it('excludes values that are already selected', () => { + const activeFilters: ActiveFilters = [ + { category: 'lib', values: ['matplotlib', 'plotly'] }, + ]; + const result = getAvailableValues(mockFilterCounts, activeFilters, 'lib'); + + expect(result).toEqual([ + ['seaborn', 8], + ['bokeh', 3], + ]); + }); + + it('excludes values selected across multiple groups of the same category', () => { + const activeFilters: ActiveFilters = [ + { category: 'lib', values: ['matplotlib'] }, + { category: 'lib', values: ['seaborn'] }, + ]; + const result = getAvailableValues(mockFilterCounts, activeFilters, 'lib'); + + expect(result).toEqual([ + ['plotly', 7], + ['bokeh', 3], + ]); + }); + + it('returns empty array when filterCounts is null', () => { + const result = getAvailableValues(null, [], 'lib'); + expect(result).toEqual([]); + }); + + it('returns empty array for a category with no counts', () => { + const result = getAvailableValues(mockFilterCounts, [], 'dep'); + expect(result).toEqual([]); + }); + + it('does not exclude values from other categories', () => { + const activeFilters: ActiveFilters = [ + { category: 'plot', values: ['scatter'] }, + ]; + const result = getAvailableValues(mockFilterCounts, activeFilters, 'lib'); + + // All lib values should still be available + expect(result).toHaveLength(4); + expect(result[0][0]).toBe('matplotlib'); + }); + + it('returns empty array when all values are selected', () => { + const activeFilters: ActiveFilters = [ + { category: 'lib', values: ['matplotlib', 'seaborn', 'plotly', 'bokeh'] }, + ]; + const result = getAvailableValues(mockFilterCounts, activeFilters, 'lib'); + expect(result).toEqual([]); + }); +}); + +describe('getAvailableValuesForGroup', () => { + it('returns available values for a group with preview counts', () => { + const activeFilters: ActiveFilters = [ + { category: 'lib', values: ['matplotlib'] }, + ]; + const orCounts = [{ seaborn: 5, plotly: 3 }]; + const currentTotal = 10; + + const result = getAvailableValuesForGroup(0, activeFilters, orCounts, currentTotal); + + // Values should be sorted by previewCount (currentTotal + count) descending + expect(result).toEqual([ + ['seaborn', 15], + ['plotly', 13], + ]); + }); + + it('returns empty array for invalid group index', () => { + const activeFilters: ActiveFilters = [ + { category: 'lib', values: ['matplotlib'] }, + ]; + const result = getAvailableValuesForGroup(5, activeFilters, [], 10); + expect(result).toEqual([]); + }); + + it('excludes values already in the group', () => { + const activeFilters: ActiveFilters = [ + { category: 'lib', values: ['matplotlib', 'seaborn'] }, + ]; + const orCounts = [{ matplotlib: 10, seaborn: 8, plotly: 3 }]; + const currentTotal = 10; + + const result = getAvailableValuesForGroup(0, activeFilters, orCounts, currentTotal); + + expect(result).toEqual([['plotly', 13]]); + }); + + it('returns empty array when orCounts for the group is empty', () => { + const activeFilters: ActiveFilters = [ + { category: 'lib', values: ['matplotlib'] }, + ]; + const orCounts = [{}]; + const currentTotal = 10; + + const result = getAvailableValuesForGroup(0, activeFilters, orCounts, currentTotal); + expect(result).toEqual([]); + }); + + it('handles missing orCounts entry gracefully', () => { + const activeFilters: ActiveFilters = [ + { category: 'lib', values: ['matplotlib'] }, + { category: 'plot', values: ['scatter'] }, + ]; + // Only one entry in orCounts, so index 1 is missing + const orCounts = [{ seaborn: 5 }]; + const currentTotal = 10; + + const result = getAvailableValuesForGroup(1, activeFilters, orCounts, currentTotal); + expect(result).toEqual([]); + }); + + it('adds currentTotal to each count for preview', () => { + const activeFilters: ActiveFilters = [ + { category: 'lib', values: ['matplotlib'] }, + ]; + const orCounts = [{ plotly: 7 }]; + const currentTotal = 25; + + const result = getAvailableValuesForGroup(0, activeFilters, orCounts, currentTotal); + expect(result).toEqual([['plotly', 32]]); + }); +}); + +describe('getSearchResults - additional coverage', () => { + it('returns empty array for whitespace-only query', () => { + const result = getSearchResults(mockFilterCounts, [], ' ', null); + expect(result).toEqual([]); + }); + + it('searches across all categories when selectedCategory is null', () => { + const result = getSearchResults(mockFilterCounts, [], 'scatter', null); + const categories = new Set(result.map((r) => r.category)); + // Should find results in both spec and plot categories + expect(categories.size).toBeGreaterThanOrEqual(1); + }); + + it('limits results to selectedCategory when provided', () => { + const result = getSearchResults(mockFilterCounts, [], 'scatter', 'plot'); + result.forEach((r) => { + expect(r.category).toBe('plot'); + }); + }); + + it('skips categories with no items after excluding selected', () => { + const activeFilters: ActiveFilters = [ + { category: 'dom', values: ['science'] }, + ]; + // 'dom' has only 'science' which is selected - should find no dom results + const result = getSearchResults(mockFilterCounts, activeFilters, 'science', 'dom'); + expect(result).toEqual([]); + }); +}); From 0c9a21edfe78c7e1f4c6c076210b1292827d4f7e Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 12 Apr 2026 21:22:08 +0000 Subject: [PATCH 3/5] Fix lint errors and review feedback in test suite - Remove unused pytest imports from 6 Python test files - Remove unused Impl/BaseRepository imports from test_repositories.py - Remove unused vi/getCounts/getSelectedValuesForCategory from filters-extended.test.ts - Fix global -> globalThis in SpecTabs.test.tsx for ESLint compatibility - Add configurable: true to window.location mock in SpecTabs.test.tsx - Use clear=True in patch.dict for test isolation in test_config_resolve.py - Use precise assertions in test_seo_helpers.py to fix CodeQL URL warnings - Fix import sorting via ruff auto-fix - Remove unused datetime import in test_insights_helpers.py https://claude.ai/code/session_01KhAhJKpEoqCzmWzcALSfW6 --- app/src/components/SpecTabs.test.tsx | 9 ++++++--- app/src/utils/filters-extended.test.ts | 4 +--- tests/unit/api/test_analytics_extended.py | 8 +------- tests/unit/api/test_insights_helpers.py | 11 ++--------- tests/unit/api/test_plots_helpers.py | 2 -- tests/unit/api/test_schemas.py | 2 -- tests/unit/api/test_seo_helpers.py | 17 ++++++++--------- .../automation/scripts/test_sync_helpers.py | 8 +------- tests/unit/core/database/test_repositories.py | 3 +-- tests/unit/core/test_config_resolve.py | 10 +++++----- 10 files changed, 25 insertions(+), 49 deletions(-) diff --git a/app/src/components/SpecTabs.test.tsx b/app/src/components/SpecTabs.test.tsx index a4bebec38a..48817d65fc 100644 --- a/app/src/components/SpecTabs.test.tsx +++ b/app/src/components/SpecTabs.test.tsx @@ -17,7 +17,7 @@ vi.mock('../constants', () => ({ beforeEach(() => { vi.restoreAllMocks(); // Mock fetch globally - return tag counts - global.fetch = vi.fn().mockResolvedValue({ + globalThis.fetch = vi.fn().mockResolvedValue({ ok: true, json: () => Promise.resolve({ @@ -362,7 +362,7 @@ describe('SpecTabs', () => { expect(screen.getByText('scatter')).toBeInTheDocument(); // Fetch is mocked for tag counts (may be called if cache is empty) - expect(global.fetch).toBeDefined(); + expect(globalThis.fetch).toBeDefined(); }); // ------------------------------------------------------- @@ -409,15 +409,18 @@ describe('SpecTabs', () => { it('fires onTrackEvent and navigates on tag click', async () => { const user = userEvent.setup(); const onTrackEvent = vi.fn(); - // Mock window.location.href setter + // Mock window.location.href setter to prevent jsdom navigation errors. + // Use configurable: true so the property can be redefined if needed. const hrefSetter = vi.fn(); Object.defineProperty(window, 'location', { value: { href: '' }, writable: true, + configurable: true, }); Object.defineProperty(window.location, 'href', { set: hrefSetter, get: () => '', + configurable: true, }); render( diff --git a/app/src/utils/filters-extended.test.ts b/app/src/utils/filters-extended.test.ts index a7bb9e1a1a..887cca4bc8 100644 --- a/app/src/utils/filters-extended.test.ts +++ b/app/src/utils/filters-extended.test.ts @@ -1,7 +1,5 @@ -import { describe, it, expect, vi } from 'vitest'; +import { describe, it, expect } from 'vitest'; import { - getCounts, - getSelectedValuesForCategory, getAvailableValues, getAvailableValuesForGroup, getSearchResults, diff --git a/tests/unit/api/test_analytics_extended.py b/tests/unit/api/test_analytics_extended.py index 667bd17212..6b093ac9a3 100644 --- a/tests/unit/api/test_analytics_extended.py +++ b/tests/unit/api/test_analytics_extended.py @@ -4,13 +4,7 @@ Covers edge cases and additional platform patterns. """ -import pytest - -from api.analytics import ( - PLATFORM_PATTERNS, - _detect_whatsapp_variant, - detect_platform, -) +from api.analytics import PLATFORM_PATTERNS, _detect_whatsapp_variant, detect_platform class TestDetectWhatsappVariant: diff --git a/tests/unit/api/test_insights_helpers.py b/tests/unit/api/test_insights_helpers.py index c6b6483917..683a42dc63 100644 --- a/tests/unit/api/test_insights_helpers.py +++ b/tests/unit/api/test_insights_helpers.py @@ -5,16 +5,9 @@ that don't require database or HTTP setup. """ -from datetime import datetime, timezone +from datetime import timezone -import pytest - -from api.routers.insights import ( - _collect_impl_tags, - _flatten_tags, - _parse_iso, - _score_bucket, -) +from api.routers.insights import _collect_impl_tags, _flatten_tags, _parse_iso, _score_bucket class TestScoreBucket: diff --git a/tests/unit/api/test_plots_helpers.py b/tests/unit/api/test_plots_helpers.py index f13c48d958..58c6ae0636 100644 --- a/tests/unit/api/test_plots_helpers.py +++ b/tests/unit/api/test_plots_helpers.py @@ -6,8 +6,6 @@ from unittest.mock import MagicMock -import pytest - from api.routers.plots import ( _build_cache_key, _build_impl_lookup, diff --git a/tests/unit/api/test_schemas.py b/tests/unit/api/test_schemas.py index 433fc23194..d07d00d279 100644 --- a/tests/unit/api/test_schemas.py +++ b/tests/unit/api/test_schemas.py @@ -4,8 +4,6 @@ Validates schema creation, defaults, and serialization. """ -import pytest - from api.schemas import ( FilterCountsResponse, FilteredPlotsResponse, diff --git a/tests/unit/api/test_seo_helpers.py b/tests/unit/api/test_seo_helpers.py index ab1cd6b2b2..da535bcbc0 100644 --- a/tests/unit/api/test_seo_helpers.py +++ b/tests/unit/api/test_seo_helpers.py @@ -7,8 +7,6 @@ from datetime import datetime from unittest.mock import MagicMock -import pytest - from api.routers.seo import BOT_HTML_TEMPLATE, _build_sitemap_xml, _lastmod @@ -36,11 +34,11 @@ def test_empty_specs(self) -> None: result = _build_sitemap_xml([]) assert 'https://pyplots.ai/" in result + assert "https://pyplots.ai/catalog" in result + assert "https://pyplots.ai/mcp" in result + assert "https://pyplots.ai/legal" in result + assert "https://pyplots.ai/stats" in result assert "" in result def test_spec_with_impls(self) -> None: @@ -141,8 +139,9 @@ def test_template_has_required_meta_tags(self) -> None: assert "Test Description" in result def test_template_has_canonical(self) -> None: + url = "https://pyplots.ai/" result = BOT_HTML_TEMPLATE.format( - title="t", description="d", image="i", url="https://pyplots.ai/" + title="t", description="d", image="i", url=url ) assert 'rel="canonical"' in result - assert "https://pyplots.ai/" in result + assert url in result diff --git a/tests/unit/automation/scripts/test_sync_helpers.py b/tests/unit/automation/scripts/test_sync_helpers.py index f55538ea99..8f023f6260 100644 --- a/tests/unit/automation/scripts/test_sync_helpers.py +++ b/tests/unit/automation/scripts/test_sync_helpers.py @@ -4,13 +4,7 @@ Focuses on _validate_quality_score, _parse_markdown_section, and _validate_spec_id. """ -import pytest - -from automation.scripts.sync_to_postgres import ( - _parse_markdown_section, - _validate_quality_score, - _validate_spec_id, -) +from automation.scripts.sync_to_postgres import _parse_markdown_section, _validate_quality_score, _validate_spec_id class TestValidateQualityScore: diff --git a/tests/unit/core/database/test_repositories.py b/tests/unit/core/database/test_repositories.py index de386f3b42..1a122bb459 100644 --- a/tests/unit/core/database/test_repositories.py +++ b/tests/unit/core/database/test_repositories.py @@ -7,12 +7,11 @@ import pytest from sqlalchemy.ext.asyncio import AsyncSession -from core.database.models import Impl, Library, Spec +from core.database.models import Library, Spec from core.database.repositories import ( IMPL_UPDATABLE_FIELDS, LIBRARY_UPDATABLE_FIELDS, SPEC_UPDATABLE_FIELDS, - BaseRepository, ImplRepository, LibraryRepository, SpecRepository, diff --git a/tests/unit/core/test_config_resolve.py b/tests/unit/core/test_config_resolve.py index ac3e8c4e85..441ab0eaca 100644 --- a/tests/unit/core/test_config_resolve.py +++ b/tests/unit/core/test_config_resolve.py @@ -17,7 +17,7 @@ class TestResolveModel: @pytest.fixture def settings(self) -> Settings: """Create a Settings instance with default model mappings.""" - with patch.dict("os.environ", {}, clear=False): + with patch.dict("os.environ", {}, clear=True): return Settings( cli_model_claude_small="claude-haiku", cli_model_claude_medium="claude-sonnet", @@ -50,24 +50,24 @@ def test_gemini_medium(self, settings: Settings) -> None: def test_unknown_cli_returns_tier(self) -> None: """Unknown CLI should return the tier unchanged (pass-through).""" - with patch.dict("os.environ", {}, clear=False): + with patch.dict("os.environ", {}, clear=True): s = Settings() assert s.resolve_model("unknown-cli", "small") == "small" def test_unknown_tier_returns_tier(self) -> None: """Unknown tier should return the tier unchanged (pass-through).""" - with patch.dict("os.environ", {}, clear=False): + with patch.dict("os.environ", {}, clear=True): s = Settings() assert s.resolve_model("claude", "xlarge") == "xlarge" def test_unknown_cli_and_tier(self) -> None: - with patch.dict("os.environ", {}, clear=False): + with patch.dict("os.environ", {}, clear=True): s = Settings() assert s.resolve_model("unknown", "unknown") == "unknown" def test_exact_model_name_passthrough(self) -> None: """When tier is an exact model name, it's returned as-is.""" - with patch.dict("os.environ", {}, clear=False): + with patch.dict("os.environ", {}, clear=True): s = Settings() result = s.resolve_model("claude", "claude-3-opus-20240229") assert result == "claude-3-opus-20240229" From 33732b0233c398b98eec30e76eae08258344a0c9 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 12 Apr 2026 21:24:39 +0000 Subject: [PATCH 4/5] Fix ruff formatting and import sorting for CI lint pass - Auto-format 7 files with ruff format (test files + agentic modules) - Fix import sorting in test_models.py (ruff I001) https://claude.ai/code/session_01KhAhJKpEoqCzmWzcALSfW6 --- agentic/workflows/modules/agent.py | 6 ++-- agentic/workflows/modules/orchestrator.py | 2 +- agentic/workflows/modules/state.py | 2 +- tests/unit/api/test_plots_helpers.py | 35 +++++------------------ tests/unit/api/test_schemas.py | 9 ++---- tests/unit/api/test_seo_helpers.py | 4 +-- tests/unit/core/database/test_models.py | 18 +++--------- 7 files changed, 19 insertions(+), 57 deletions(-) diff --git a/agentic/workflows/modules/agent.py b/agentic/workflows/modules/agent.py index 947246f38d..950cb43046 100644 --- a/agentic/workflows/modules/agent.py +++ b/agentic/workflows/modules/agent.py @@ -144,7 +144,7 @@ def parse_json(output: str, target_type: Type[T] = None) -> Any: return [target_type.model_validate(item) for item in parsed] return target_type.model_validate(parsed) return parsed - except (json.JSONDecodeError, ValueError): + except json.JSONDecodeError, ValueError: pass # Strategy 2: Strip markdown code fences @@ -162,7 +162,7 @@ def parse_json(output: str, target_type: Type[T] = None) -> Any: return [target_type.model_validate(item) for item in parsed] return target_type.model_validate(parsed) return parsed - except (json.JSONDecodeError, ValueError): + except json.JSONDecodeError, ValueError: pass # Strategy 3: Find first JSON array or object in output @@ -182,7 +182,7 @@ def parse_json(output: str, target_type: Type[T] = None) -> Any: return [target_type.model_validate(item) for item in parsed] return target_type.model_validate(parsed) return parsed - except (json.JSONDecodeError, ValueError): + except json.JSONDecodeError, ValueError: continue raise json.JSONDecodeError("No valid JSON found in output", output, 0) diff --git a/agentic/workflows/modules/orchestrator.py b/agentic/workflows/modules/orchestrator.py index a3f5cf5b59..1934234d88 100644 --- a/agentic/workflows/modules/orchestrator.py +++ b/agentic/workflows/modules/orchestrator.py @@ -45,5 +45,5 @@ def extract_run_id(stdout: str) -> str | None: try: data = json.loads(stdout.strip()) return data.get("run_id") - except (json.JSONDecodeError, ValueError): + except json.JSONDecodeError, ValueError: return None diff --git a/agentic/workflows/modules/state.py b/agentic/workflows/modules/state.py index ff81d60746..47387c54c3 100644 --- a/agentic/workflows/modules/state.py +++ b/agentic/workflows/modules/state.py @@ -172,7 +172,7 @@ def from_stdin(cls) -> Optional["WorkflowState"]: state = cls(run_id=run_id, prompt=data.get("prompt", "")) state.data = data return state - except (json.JSONDecodeError, EOFError): + except json.JSONDecodeError, EOFError: return None def to_stdout(self) -> None: diff --git a/tests/unit/api/test_plots_helpers.py b/tests/unit/api/test_plots_helpers.py index 58c6ae0636..b760b9e62d 100644 --- a/tests/unit/api/test_plots_helpers.py +++ b/tests/unit/api/test_plots_helpers.py @@ -134,19 +134,13 @@ def test_single_group_no_match(self) -> None: def test_multiple_groups_and_logic(self) -> None: spec_lookup = {"s1": {"tags": {"plot_type": ["scatter"], "domain": ["statistics"]}}} impl_lookup = {} - groups = [ - {"category": "plot", "values": ["scatter"]}, - {"category": "dom", "values": ["statistics"]}, - ] + groups = [{"category": "plot", "values": ["scatter"]}, {"category": "dom", "values": ["statistics"]}] assert _image_matches_groups("s1", "matplotlib", groups, spec_lookup, impl_lookup) is True def test_multiple_groups_one_fails(self) -> None: spec_lookup = {"s1": {"tags": {"plot_type": ["scatter"], "domain": ["finance"]}}} impl_lookup = {} - groups = [ - {"category": "plot", "values": ["scatter"]}, - {"category": "dom", "values": ["statistics"]}, - ] + groups = [{"category": "plot", "values": ["scatter"]}, {"category": "dom", "values": ["statistics"]}] assert _image_matches_groups("s1", "matplotlib", groups, spec_lookup, impl_lookup) is False def test_spec_not_in_lookup(self) -> None: @@ -223,10 +217,7 @@ def test_single_group(self) -> None: assert result == "filter:lib=matplotlib" def test_multiple_groups_sorted(self) -> None: - groups = [ - {"category": "plot", "values": ["scatter"]}, - {"category": "lib", "values": ["matplotlib"]}, - ] + groups = [{"category": "plot", "values": ["scatter"]}, {"category": "lib", "values": ["matplotlib"]}] result = _build_cache_key(groups) assert result == "filter:lib=matplotlib:plot=scatter" @@ -236,14 +227,8 @@ def test_values_sorted(self) -> None: assert result == "filter:lib=matplotlib,seaborn" def test_stable_key_different_order(self) -> None: - groups1 = [ - {"category": "lib", "values": ["matplotlib"]}, - {"category": "plot", "values": ["scatter"]}, - ] - groups2 = [ - {"category": "plot", "values": ["scatter"]}, - {"category": "lib", "values": ["matplotlib"]}, - ] + groups1 = [{"category": "lib", "values": ["matplotlib"]}, {"category": "plot", "values": ["scatter"]}] + groups2 = [{"category": "plot", "values": ["scatter"]}, {"category": "lib", "values": ["matplotlib"]}] assert _build_cache_key(groups1) == _build_cache_key(groups2) @@ -374,10 +359,7 @@ def test_no_filters_returns_all(self) -> None: assert len(result) == 1 def test_filter_by_lib(self) -> None: - images = [ - {"spec_id": "s1", "library": "matplotlib"}, - {"spec_id": "s1", "library": "seaborn"}, - ] + images = [{"spec_id": "s1", "library": "matplotlib"}, {"spec_id": "s1", "library": "seaborn"}] spec_lookup = {"s1": {"tags": {}}} impl_lookup = {} groups = [{"category": "lib", "values": ["matplotlib"]}] @@ -432,10 +414,7 @@ class TestCalculateOrCounts: def test_or_counts_for_single_group(self) -> None: filter_groups = [{"category": "lib", "values": ["matplotlib"]}] - all_images = [ - {"spec_id": "s1", "library": "matplotlib"}, - {"spec_id": "s1", "library": "seaborn"}, - ] + all_images = [{"spec_id": "s1", "library": "matplotlib"}, {"spec_id": "s1", "library": "seaborn"}] spec_id_to_tags = {"s1": {}} spec_lookup = {"s1": {"tags": {}}} impl_lookup = {} diff --git a/tests/unit/api/test_schemas.py b/tests/unit/api/test_schemas.py index d07d00d279..1e9f70e382 100644 --- a/tests/unit/api/test_schemas.py +++ b/tests/unit/api/test_schemas.py @@ -139,10 +139,7 @@ def test_defaults_empty(self) -> None: assert counts.style == {} def test_with_counts(self) -> None: - counts = FilterCountsResponse( - lib={"matplotlib": 5, "seaborn": 3}, - plot={"scatter": 8}, - ) + counts = FilterCountsResponse(lib={"matplotlib": 5, "seaborn": 3}, plot={"scatter": 8}) assert counts.lib["matplotlib"] == 5 @@ -150,9 +147,7 @@ class TestFilteredPlotsResponse: """Tests for FilteredPlotsResponse schema.""" def test_minimal(self) -> None: - resp = FilteredPlotsResponse( - total=0, images=[], counts={}, globalCounts={}, orCounts=[] - ) + resp = FilteredPlotsResponse(total=0, images=[], counts={}, globalCounts={}, orCounts=[]) assert resp.total == 0 assert resp.offset == 0 assert resp.limit is None diff --git a/tests/unit/api/test_seo_helpers.py b/tests/unit/api/test_seo_helpers.py index da535bcbc0..58536d8728 100644 --- a/tests/unit/api/test_seo_helpers.py +++ b/tests/unit/api/test_seo_helpers.py @@ -140,8 +140,6 @@ def test_template_has_required_meta_tags(self) -> None: def test_template_has_canonical(self) -> None: url = "https://pyplots.ai/" - result = BOT_HTML_TEMPLATE.format( - title="t", description="d", image="i", url=url - ) + result = BOT_HTML_TEMPLATE.format(title="t", description="d", image="i", url=url) assert 'rel="canonical"' in result assert url in result diff --git a/tests/unit/core/database/test_models.py b/tests/unit/core/database/test_models.py index 5098b407cf..d3c16d1585 100644 --- a/tests/unit/core/database/test_models.py +++ b/tests/unit/core/database/test_models.py @@ -7,14 +7,7 @@ import pytest from sqlalchemy.ext.asyncio import AsyncSession -from core.database.models import ( - Impl, - Library, - MAX_LIBRARY_ID_LENGTH, - MAX_SPEC_ID_LENGTH, - REVIEW_VERDICTS, - Spec, -) +from core.database.models import MAX_LIBRARY_ID_LENGTH, MAX_SPEC_ID_LENGTH, REVIEW_VERDICTS, Impl, Library, Spec class TestModelConstants: @@ -66,6 +59,7 @@ async def test_persist_and_retrieve(self, test_session: AsyncSession) -> None: await test_session.commit() from sqlalchemy import select + result = await test_session.execute(select(Spec).where(Spec.id == "test-spec")) retrieved = result.scalar_one() assert retrieved.title == "Test Spec" @@ -140,16 +134,12 @@ async def test_persist_with_foreign_keys(self, test_session: AsyncSession) -> No test_session.add_all([lib, spec]) await test_session.commit() - impl = Impl( - spec_id="scatter-basic", - library_id="matplotlib", - code="import matplotlib", - quality_score=90.0, - ) + impl = Impl(spec_id="scatter-basic", library_id="matplotlib", code="import matplotlib", quality_score=90.0) test_session.add(impl) await test_session.commit() from sqlalchemy import select + result = await test_session.execute(select(Impl).where(Impl.spec_id == "scatter-basic")) retrieved = result.scalar_one() assert retrieved.quality_score == 90.0 From 4057e24e92568c3d651cb029a407c7ce13f8abab Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 12 Apr 2026 21:41:17 +0000 Subject: [PATCH 5/5] Fix agentic module syntax errors and locale-sensitive test assertion - Fix Python 2-style except syntax in orchestrator.py, agent.py, state.py: `except X, Y:` -> `except (X, Y):` with fmt: skip to prevent ruff formatter bug from reverting the fix - Make StatsPage quality score assertion locale-tolerant (82.5 -> /82[.,]5/) https://claude.ai/code/session_01KhAhJKpEoqCzmWzcALSfW6 --- agentic/workflows/modules/agent.py | 6 +++--- agentic/workflows/modules/orchestrator.py | 2 +- agentic/workflows/modules/state.py | 2 +- app/src/pages/StatsPage.test.tsx | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/agentic/workflows/modules/agent.py b/agentic/workflows/modules/agent.py index 950cb43046..3224af1d3b 100644 --- a/agentic/workflows/modules/agent.py +++ b/agentic/workflows/modules/agent.py @@ -144,7 +144,7 @@ def parse_json(output: str, target_type: Type[T] = None) -> Any: return [target_type.model_validate(item) for item in parsed] return target_type.model_validate(parsed) return parsed - except json.JSONDecodeError, ValueError: + except (json.JSONDecodeError, ValueError): # fmt: skip pass # Strategy 2: Strip markdown code fences @@ -162,7 +162,7 @@ def parse_json(output: str, target_type: Type[T] = None) -> Any: return [target_type.model_validate(item) for item in parsed] return target_type.model_validate(parsed) return parsed - except json.JSONDecodeError, ValueError: + except (json.JSONDecodeError, ValueError): # fmt: skip pass # Strategy 3: Find first JSON array or object in output @@ -182,7 +182,7 @@ def parse_json(output: str, target_type: Type[T] = None) -> Any: return [target_type.model_validate(item) for item in parsed] return target_type.model_validate(parsed) return parsed - except json.JSONDecodeError, ValueError: + except (json.JSONDecodeError, ValueError): # fmt: skip continue raise json.JSONDecodeError("No valid JSON found in output", output, 0) diff --git a/agentic/workflows/modules/orchestrator.py b/agentic/workflows/modules/orchestrator.py index 1934234d88..b262c844dd 100644 --- a/agentic/workflows/modules/orchestrator.py +++ b/agentic/workflows/modules/orchestrator.py @@ -45,5 +45,5 @@ def extract_run_id(stdout: str) -> str | None: try: data = json.loads(stdout.strip()) return data.get("run_id") - except json.JSONDecodeError, ValueError: + except (json.JSONDecodeError, ValueError): # fmt: skip return None diff --git a/agentic/workflows/modules/state.py b/agentic/workflows/modules/state.py index 47387c54c3..677651be65 100644 --- a/agentic/workflows/modules/state.py +++ b/agentic/workflows/modules/state.py @@ -172,7 +172,7 @@ def from_stdin(cls) -> Optional["WorkflowState"]: state = cls(run_id=run_id, prompt=data.get("prompt", "")) state.data = data return state - except json.JSONDecodeError, EOFError: + except (json.JSONDecodeError, EOFError): # fmt: skip return None def to_stdout(self) -> None: diff --git a/app/src/pages/StatsPage.test.tsx b/app/src/pages/StatsPage.test.tsx index 4bb4e9f4d7..ae87e70994 100644 --- a/app/src/pages/StatsPage.test.tsx +++ b/app/src/pages/StatsPage.test.tsx @@ -141,8 +141,8 @@ describe('StatsPage', () => { expect(screen.getByText('53')).toBeInTheDocument(); // total_lines_of_code: 245600 => formatNum => "245.6K" expect(screen.getByText('245.6K')).toBeInTheDocument(); - // avg_quality_score: 82.5 => formatNum(82.5) => "82.5" (toLocaleString) - expect(screen.getByText('82.5')).toBeInTheDocument(); + // avg_quality_score: 82.5 => formatNum(82.5) => locale-sensitive decimal separator + expect(screen.getByText(/82[.,]5/)).toBeInTheDocument(); // coverage_percent: 73 => "73%" expect(screen.getByText('73%')).toBeInTheDocument(); });