Skip to content

Commit 1c46128

Browse files
feat: update proxy and SEO handling, enhance analytics tracking
- Simplify HTTPException handling in proxy.py - Update sitemap generation to include catalog page and new spec URLs in seo.py - Refactor analytics tracking in CatalogPage and InteractivePage components - Remove unused FullscreenModal import in HomePage - Clean up unused imports in SpecPage
1 parent a7e5775 commit 1c46128

File tree

12 files changed

+53
-667
lines changed

12 files changed

+53
-667
lines changed

api/routers/proxy.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from fastapi import APIRouter, HTTPException
55
from fastapi.responses import HTMLResponse
66

7+
78
router = APIRouter(tags=["proxy"])
89

910
# Script injected to report content size to parent window
@@ -75,20 +76,17 @@ async def proxy_html(url: str):
7576
"""
7677
# Security: Only allow URLs from our GCS bucket
7778
if not url.startswith(f"https://{ALLOWED_HOST}/{ALLOWED_BUCKET}/"):
78-
raise HTTPException(
79-
status_code=400,
80-
detail=f"Only URLs from {ALLOWED_HOST}/{ALLOWED_BUCKET} are allowed",
81-
)
79+
raise HTTPException(status_code=400, detail=f"Only URLs from {ALLOWED_HOST}/{ALLOWED_BUCKET} are allowed")
8280

8381
# Fetch the HTML
8482
async with httpx.AsyncClient(timeout=30.0) as client:
8583
try:
8684
response = await client.get(url)
8785
response.raise_for_status()
8886
except httpx.HTTPStatusError as e:
89-
raise HTTPException(status_code=e.response.status_code, detail="Failed to fetch HTML")
90-
except httpx.RequestError:
91-
raise HTTPException(status_code=502, detail="Failed to connect to storage")
87+
raise HTTPException(status_code=e.response.status_code, detail="Failed to fetch HTML") from e
88+
except httpx.RequestError as e:
89+
raise HTTPException(status_code=502, detail="Failed to connect to storage") from e
9290

9391
html_content = response.text
9492

api/routers/seo.py

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88

99
from api.cache import cache_key, get_cache, set_cache
1010
from api.dependencies import optional_db
11-
from core.constants import LIBRARIES_METADATA
1211
from core.database import SpecRepository
1312

1413

@@ -20,7 +19,7 @@ async def get_sitemap(db: AsyncSession | None = Depends(optional_db)):
2019
"""
2120
Generate dynamic XML sitemap for SEO.
2221
23-
Includes all specs with implementations and all libraries.
22+
Includes root, catalog page, and all specs with implementations.
2423
"""
2524
key = cache_key("sitemap_xml")
2625
cached = get_cache(key)
@@ -32,6 +31,7 @@ async def get_sitemap(db: AsyncSession | None = Depends(optional_db)):
3231
'<?xml version="1.0" encoding="UTF-8"?>',
3332
'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">',
3433
" <url><loc>https://pyplots.ai/</loc></url>",
34+
" <url><loc>https://pyplots.ai/catalog</loc></url>",
3535
]
3636

3737
# Add spec URLs (only specs with implementations)
@@ -41,12 +41,7 @@ async def get_sitemap(db: AsyncSession | None = Depends(optional_db)):
4141
for spec in specs:
4242
if spec.impls: # Only include specs with implementations
4343
spec_id = html.escape(spec.id)
44-
xml_lines.append(f" <url><loc>https://pyplots.ai/?spec={spec_id}</loc></url>")
45-
46-
# Add library URLs (static list)
47-
for lib in LIBRARIES_METADATA:
48-
lib_id = html.escape(lib["id"])
49-
xml_lines.append(f" <url><loc>https://pyplots.ai/?lib={lib_id}</loc></url>")
44+
xml_lines.append(f" <url><loc>https://pyplots.ai/{spec_id}</loc></url>")
5045

5146
xml_lines.append("</urlset>")
5247
xml = "\n".join(xml_lines)

app/src/App.tsx

Lines changed: 0 additions & 252 deletions
This file was deleted.

0 commit comments

Comments
 (0)