You can close this tab and return to your AI agent.
+ Admin Panel
+ """,
+ )
+
+ # Start chain: redirect to first gateway, store remaining in cookie
+ first_gw_id = pending_gateway_ids[0]
+ remaining = pending_gateway_ids[1:]
+
+ response = RedirectResponse(url=f"{root_path}/oauth/authorize/{first_gw_id}", status_code=302)
+
+ if remaining:
+ use_secure = (settings.environment == "production") or settings.secure_cookies
+ response.set_cookie(
+ key="oauth_chain",
+ value=",".join(remaining),
+ max_age=600, # 10 minutes for the full chain
+ httponly=True,
+ secure=use_secure,
+ samesite=settings.cookie_samesite,
+ path=settings.app_root_path or "/",
+ )
+
+ return response
+
+
@oauth_router.get("/status/{gateway_id}")
async def get_oauth_status(
gateway_id: str,
diff --git a/mcpgateway/routers/sso.py b/mcpgateway/routers/sso.py
index f4d93566bb..81d00cad53 100644
--- a/mcpgateway/routers/sso.py
+++ b/mcpgateway/routers/sso.py
@@ -150,6 +150,27 @@ def _normalize_origin(scheme: str, host: str, port: int | None) -> str:
return f"{scheme}://{host}:{port}"
+def _is_safe_local_path(path: str) -> bool:
+ """Validate that a path is a safe local redirect target (no open redirect).
+
+ Args:
+ path: The path to validate.
+
+ Returns:
+ True if the path is a safe relative path starting with ``/``.
+ """
+ if not path or not isinstance(path, str):
+ return False
+ if not path.startswith("/"):
+ return False
+ if path.startswith("//") or "@" in path or "\\" in path:
+ return False
+ parsed = urlparse(path)
+ if parsed.scheme or parsed.netloc:
+ return False
+ return True
+
+
def _validate_redirect_uri(redirect_uri: str, request: Request | None = None) -> bool:
"""Validate redirect_uri to prevent open redirect attacks.
@@ -396,8 +417,24 @@ async def handle_sso_callback(
if not access_token:
return RedirectResponse(url=f"{root_path}/admin/login?error=user_creation_failed", status_code=302)
- # Create redirect response
- redirect_response = RedirectResponse(url=f"{root_path}/admin", status_code=302)
+ # Create redirect response — check for post-login destination cookie
+ post_login_next = request.cookies.get("post_login_next") if request else None
+ if post_login_next and _is_safe_local_path(post_login_next):
+ redirect_url = f"{root_path}{post_login_next}"
+ else:
+ redirect_url = f"{root_path}/admin"
+ redirect_response = RedirectResponse(url=redirect_url, status_code=302)
+
+ # Clear the post_login_next cookie regardless
+ if post_login_next:
+ use_secure = (settings.environment == "production") or settings.secure_cookies
+ redirect_response.delete_cookie(
+ "post_login_next",
+ path=settings.app_root_path or "/",
+ secure=use_secure,
+ httponly=True,
+ samesite=settings.cookie_samesite,
+ )
# Set secure HTTP-only cookie using the same method as email auth
# First-Party
diff --git a/mcpgateway/schemas.py b/mcpgateway/schemas.py
index 821463b26a..6571c2bbd5 100644
--- a/mcpgateway/schemas.py
+++ b/mcpgateway/schemas.py
@@ -379,14 +379,6 @@ class MetricsResponse(BaseModelWithConfigDict):
@model_serializer(mode="wrap")
def _exclude_none_a2a(self, handler):
- """Omit the A2A metrics field when that feature is disabled.
-
- Args:
- handler: Pydantic serializer callback for the wrapped model.
-
- Returns:
- Dict[str, Any]: Serialized metrics payload without empty A2A fields.
- """
result = handler(self)
if self.a2a_agents is None:
result.pop("a2aAgents", None)
@@ -4071,6 +4063,19 @@ def validate_id(cls, v: Optional[str]) -> Optional[str]:
oauth_enabled: bool = Field(False, description="Enable OAuth 2.0 for MCP client authentication")
oauth_config: Optional[Dict[str, Any]] = Field(None, description="OAuth 2.0 configuration (authorization_server, scopes_supported, etc.)")
+ # Meta-server configuration
+ server_type: str = Field("standard", description="Server type: 'standard' or 'meta'. Meta servers expose meta-tools instead of real tools.")
+ hide_underlying_tools: bool = Field(True, description="When True and server_type is 'meta', underlying tools are hidden from tool listing endpoints")
+ meta_config: Optional[Dict[str, Any]] = Field(None, description="Meta-server configuration (MetaConfig schema). Only applicable when server_type is 'meta'.")
+ meta_scope: Optional[Dict[str, Any]] = Field(None, description="Scope rules for filtering tools visible to the meta-server (MetaToolScope schema).")
+
+ @field_validator("server_type")
+ @classmethod
+ def validate_server_type(cls, v: str) -> str:
+ if v not in ("standard", "meta"):
+ raise ValueError("server_type must be one of: standard, meta")
+ return v
+
@field_validator("name")
@classmethod
def validate_name(cls, v: str) -> str:
@@ -4232,6 +4237,19 @@ def validate_id(cls, v: Optional[str]) -> Optional[str]:
associated_prompts: Optional[List[str]] = Field(None, description="Comma-separated prompt IDs")
associated_a2a_agents: Optional[List[str]] = Field(None, description="Comma-separated A2A agent IDs")
+ # Meta-server configuration (optional update fields)
+ server_type: Optional[str] = Field(None, description="Server type: 'standard' or 'meta'")
+ hide_underlying_tools: Optional[bool] = Field(None, description="When True and server_type is 'meta', underlying tools are hidden")
+ meta_config: Optional[Dict[str, Any]] = Field(None, description="Meta-server configuration (MetaConfig schema)")
+ meta_scope: Optional[Dict[str, Any]] = Field(None, description="Scope rules for filtering tools visible to the meta-server")
+
+ @field_validator("server_type")
+ @classmethod
+ def validate_server_type(cls, v: Optional[str]) -> Optional[str]:
+ if v is not None and v not in ("standard", "meta"):
+ raise ValueError("server_type must be one of: standard, meta")
+ return v
+
@field_validator("name")
@classmethod
def validate_name(cls, v: str) -> str:
@@ -4364,6 +4382,12 @@ class ServerRead(BaseModelWithConfigDict):
oauth_enabled: bool = Field(False, description="Whether OAuth 2.0 is enabled for MCP client authentication")
oauth_config: Optional[Dict[str, Any]] = Field(None, description="OAuth 2.0 configuration (authorization_server, scopes_supported, etc.)")
+ # Meta-server configuration
+ server_type: str = Field("standard", description="Server type: 'standard' or 'meta'")
+ hide_underlying_tools: bool = Field(True, description="When True and server_type is 'meta', underlying tools are hidden")
+ meta_config: Optional[Dict[str, Any]] = Field(None, description="Meta-server configuration (MetaConfig schema)")
+ meta_scope: Optional[Dict[str, Any]] = Field(None, description="Scope rules for filtering tools visible to the meta-server")
+
_normalize_visibility = field_validator("visibility", mode="before")(classmethod(lambda cls, v: _coerce_visibility(v)))
@model_validator(mode="before")
@@ -8212,8 +8236,6 @@ class PerformanceHistoryResponse(BaseModel):
# ---------------------------------------------------------------------------
-# Tool Plugin Binding Schemas
-# ---------------------------------------------------------------------------
class PluginBindingMode(str, Enum):
@@ -8349,3 +8371,22 @@ class ToolPluginBindingListResponse(BaseModelWithConfigDict):
bindings: List[ToolPluginBindingResponse] = Field(default_factory=list, description="List of tool plugin bindings")
total: int = Field(0, description="Total number of bindings returned")
+
+
+class ToolSearchResult(BaseModelWithConfigDict):
+ """Response schema for a single tool search result with relevance score."""
+
+ tool_name: str = Field(..., description="Tool name")
+ description: Optional[str] = Field(None, description="Tool description")
+ similarity_score: float = Field(..., ge=0.0, le=1.0, description="Similarity score (0-1)")
+ server_id: Optional[str] = Field(None, description="Server ID the tool belongs to")
+ server_name: Optional[str] = Field(None, description="Server name the tool belongs to")
+ tags: List[str] = Field(default_factory=list, description="Tool tags")
+
+
+class SemanticSearchResponse(BaseModelWithConfigDict):
+ """Response schema for semantic tool search."""
+
+ results: List[ToolSearchResult] = Field(..., description="Ranked list of matching tools")
+ query: str = Field(..., description="Original search query")
+ total_results: int = Field(0, description="Number of results returned")
diff --git a/mcpgateway/services/embedding_service.py b/mcpgateway/services/embedding_service.py
new file mode 100644
index 0000000000..3f0c2d71ef
--- /dev/null
+++ b/mcpgateway/services/embedding_service.py
@@ -0,0 +1,9 @@
+"""Embedding service stub.
+
+Full implementation requires an embedding model (e.g. OpenAI text-embedding-3-small)
+and pgvector. This stub is a no-op so the application starts without those dependencies.
+"""
+
+
+async def index_tool_fire_and_forget(tool_id: str) -> None:
+ """Index a tool's embedding in the background. No-op stub."""
diff --git a/mcpgateway/services/meta_tool_service.py b/mcpgateway/services/meta_tool_service.py
new file mode 100644
index 0000000000..03f8cb2cac
--- /dev/null
+++ b/mcpgateway/services/meta_tool_service.py
@@ -0,0 +1,317 @@
+# -*- coding: utf-8 -*-
+"""Location: ./mcpgateway/services/meta_tool_service.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+
+Meta-Tool Service Implementation.
+This module implements the business logic for meta-tools (describe_tool, execute_tool).
+"""
+
+# Standard
+import time
+from typing import Any, Dict, List, Optional
+import uuid
+
+# Third-Party
+import jsonschema
+import orjson
+from sqlalchemy import select
+from sqlalchemy.orm import joinedload, Session
+
+# First-Party
+from mcpgateway.db import Server as DbServer
+from mcpgateway.db import Tool as DbTool
+from mcpgateway.db import ToolMetric
+from mcpgateway.meta_server.schemas import (
+ DescribeToolResponse,
+ ExecuteToolResponse,
+)
+from mcpgateway.services.logging_service import LoggingService
+from mcpgateway.services.tool_service import ToolService
+
+# Initialize logging
+logging_service = LoggingService()
+logger = logging_service.get_logger(__name__)
+
+
+class MetaToolService:
+ """Service for meta-tool operations."""
+
+ def __init__(self, db: Session):
+ """Initialize the MetaToolService.
+
+ Args:
+ db: Database session
+ """
+ self.db = db
+ self.tool_service = ToolService()
+
+ async def describe_tool(
+ self,
+ tool_name: str,
+ include_metrics: bool = False,
+ user_email: Optional[str] = None,
+ token_teams: Optional[List[str]] = None,
+ is_admin: bool = False,
+ scope: Optional[str] = None,
+ ) -> DescribeToolResponse:
+ """Get detailed information about a specific tool.
+
+ This implements the describe_tool meta-tool functionality with:
+ - Tool resolution by name
+ - Schema and metadata fetching
+ - Optional metrics fetching
+ - Scope verification
+
+ Args:
+ tool_name: Name of the tool to describe
+ include_metrics: Whether to include execution metrics
+ user_email: Email of requesting user
+ token_teams: Team IDs from JWT token
+ is_admin: Whether user is an admin
+ scope: Optional scope filter
+
+ Returns:
+ DescribeToolResponse with tool details
+
+ Raises:
+ ValueError: If tool not found or access denied
+ """
+ # Resolve tool by name with scope verification
+ tool = await self._resolve_tool(tool_name, user_email, token_teams, is_admin, scope)
+
+ if not tool:
+ raise ValueError(f"Tool not found: {tool_name}")
+
+ # Fetch server information
+ server_id = None
+ server_name = None
+ if tool.servers:
+ # Get first server (tools can be associated with multiple servers)
+ server = tool.servers[0]
+ server_id = server.id
+ server_name = server.name
+
+ # Fetch metrics if requested
+ metrics = None
+ if include_metrics:
+ metrics = await self._fetch_tool_metrics(tool.id)
+
+ # Extract tag strings from database format
+ # Tags may be stored as [{'id': 'tag', 'label': 'tag'}, ...] or ['tag', ...]
+ tags_list = tool.tags or []
+ if tags_list and isinstance(tags_list[0], dict):
+ tags_list = [tag.get("id") or tag.get("label") for tag in tags_list if isinstance(tag, dict)]
+
+ # Build response
+ response = DescribeToolResponse(
+ name=tool.name,
+ description=tool.description or tool.original_description,
+ input_schema=tool.input_schema,
+ output_schema=tool.output_schema,
+ server_id=server_id,
+ server_name=server_name,
+ tags=tags_list,
+ metrics=metrics,
+ annotations=tool.annotations,
+ )
+
+ return response
+
+ async def execute_tool(
+ self,
+ tool_name: str,
+ arguments: Dict[str, Any],
+ user_email: Optional[str] = None,
+ token_teams: Optional[List[str]] = None,
+ is_admin: bool = False,
+ scope: Optional[str] = None,
+ request_headers: Optional[Dict[str, str]] = None,
+ ) -> ExecuteToolResponse:
+ """Execute a tool with argument validation and routing.
+
+ This implements the execute_tool meta-tool functionality with:
+ - Tool resolution
+ - Argument validation against JSON schema
+ - Routing to backend server
+ - Safe header forwarding
+ - Execution metadata
+
+ Args:
+ tool_name: Name of the tool to execute
+ arguments: Arguments to pass to the tool
+ user_email: Email of requesting user
+ token_teams: Team IDs from JWT token
+ is_admin: Whether user is an admin
+ scope: Optional scope filter
+ request_headers: Headers from the original request
+
+ Returns:
+ ExecuteToolResponse with execution result and metadata
+
+ Raises:
+ ValueError: If tool not found, validation fails, or execution fails
+ PermissionError: If access is denied
+ """
+ start_time = time.time()
+
+ # Resolve tool with scope verification
+ tool = await self._resolve_tool(tool_name, user_email, token_teams, is_admin, scope)
+
+ if not tool:
+ raise ValueError(f"Tool not found: {tool_name}")
+
+ # Validate arguments against input schema
+ if tool.input_schema:
+ try:
+ jsonschema.validate(instance=arguments, schema=tool.input_schema)
+ except jsonschema.ValidationError as e:
+ raise ValueError(f"Argument validation failed: {e.message}")
+
+ # Execute tool via ToolService
+ try:
+ # Generate request ID for tracking
+ request_id = str(uuid.uuid4())
+
+ # Prepare metadata
+ meta_data = {
+ "request_id": request_id,
+ "meta_tool": "execute_tool",
+ }
+
+ # Forward request to ToolService for execution
+ tool_result = await self.tool_service.invoke_tool(
+ db=self.db,
+ name=tool_name,
+ arguments=arguments,
+ request_headers=request_headers,
+ app_user_email=user_email,
+ user_email=user_email,
+ token_teams=token_teams,
+ meta_data=meta_data,
+ )
+
+ # Extract result content
+ result_data = None
+ if tool_result.content:
+ if isinstance(tool_result.content, list) and len(tool_result.content) > 0:
+ first_content = tool_result.content[0]
+ if hasattr(first_content, "text"):
+ result_data = first_content.text
+ elif hasattr(first_content, "model_dump"):
+ result_data = orjson.dumps(first_content.model_dump(by_alias=True, mode="json")).decode()
+ else:
+ result_data = str(first_content)
+ else:
+ result_data = str(tool_result.content)
+
+ execution_time_ms = int((time.time() - start_time) * 1000)
+
+ return ExecuteToolResponse(
+ tool_name=tool_name,
+ success=not getattr(tool_result, "isError", getattr(tool_result, "is_error", False)),
+ result=result_data,
+ error=None,
+ execution_time_ms=execution_time_ms,
+ )
+
+ except Exception as e:
+ execution_time_ms = int((time.time() - start_time) * 1000)
+ logger.error(f"Tool execution failed for {tool_name}: {e}")
+ return ExecuteToolResponse(
+ tool_name=tool_name,
+ success=False,
+ result=None,
+ error=str(e),
+ execution_time_ms=execution_time_ms,
+ )
+
+ async def _resolve_tool(
+ self,
+ tool_name: str,
+ user_email: Optional[str],
+ token_teams: Optional[List[str]],
+ is_admin: bool,
+ scope: Optional[str],
+ ) -> Optional[DbTool]:
+ """Resolve a tool by name with scope verification.
+
+ Args:
+ tool_name: Name of the tool
+ user_email: Email of requesting user
+ token_teams: Team IDs from JWT token
+ is_admin: Whether user is an admin
+ scope: Optional scope filter
+
+ Returns:
+ Tool object or None if not found/accessible
+ """
+ # Build query with eager loading of relationships
+ query = select(DbTool).options(joinedload(DbTool.servers)).where(DbTool.name == tool_name, DbTool.enabled == True)
+
+ # Apply scope filtering if provided
+ # Scope filtering logic:
+ # - If scope is provided, filter by visibility or team
+ # - Admin bypass if is_admin=True
+ if scope and not is_admin:
+ # Scope can be: public, team:, private
+ if scope == "public":
+ query = query.where(DbTool.visibility == "public")
+ elif scope.startswith("team:"):
+ team_id = scope.replace("team:", "")
+ query = query.where(DbTool.team_id == team_id)
+ elif scope == "private":
+ query = query.where(DbTool.owner_email == user_email)
+
+ # Apply team-based filtering if not admin
+ if not is_admin and token_teams is not None:
+ # If token_teams is empty list, only public tools
+ # If token_teams has values, include team tools + public tools
+ if len(token_teams) == 0:
+ query = query.where(DbTool.visibility == "public")
+ else:
+ # Third-Party
+ from sqlalchemy import or_
+
+ query = query.where(or_(DbTool.visibility == "public", DbTool.team_id.in_(token_teams)))
+
+ result = self.db.execute(query)
+ tool = result.scalars().first()
+
+ return tool
+
+ async def _fetch_tool_metrics(self, tool_id: str) -> Optional[Dict[str, Any]]:
+ """Fetch execution metrics for a tool.
+
+ Args:
+ tool_id: Tool ID
+
+ Returns:
+ Dictionary with metrics or None
+ """
+ try:
+ # Query ToolMetric for aggregated metrics
+ query = select(ToolMetric).where(ToolMetric.tool_id == tool_id)
+ result = self.db.execute(query)
+ metrics_records = result.scalars().all()
+
+ if not metrics_records:
+ return None
+
+ # Aggregate metrics
+ execution_count = len(metrics_records)
+ successful = sum(1 for m in metrics_records if m.success)
+ failed = execution_count - successful
+ total_time = sum(m.response_time for m in metrics_records if m.response_time)
+ avg_time = total_time / execution_count if execution_count > 0 else 0
+
+ return {
+ "execution_count": execution_count,
+ "successful_executions": successful,
+ "failed_executions": failed,
+ "success_rate": successful / execution_count if execution_count > 0 else 0,
+ "avg_response_time_ms": avg_time,
+ }
+ except Exception as e:
+ logger.warning(f"Failed to fetch metrics for tool {tool_id}: {e}")
+ return None
diff --git a/mcpgateway/services/prompt_service.py b/mcpgateway/services/prompt_service.py
index 50b3e2448d..7fa5fa7e88 100644
--- a/mcpgateway/services/prompt_service.py
+++ b/mcpgateway/services/prompt_service.py
@@ -30,7 +30,6 @@
from mcp import ClientSession, types
from mcp.client.sse import sse_client
from mcp.client.streamable_http import streamablehttp_client
-from mcp.types import GetPromptRequest, GetPromptRequestParams
import orjson
from pydantic import ValidationError
from sqlalchemy import and_, delete, desc, not_, or_, select
@@ -39,7 +38,6 @@
# First-Party
from mcpgateway.common.models import Message, PromptResult, Role, TextContent
-from mcpgateway.common.validators import validate_meta_data as _validate_meta_data
from mcpgateway.config import settings
from mcpgateway.db import EmailTeam
from mcpgateway.db import EmailTeamMember as DbEmailTeamMember
@@ -143,55 +141,6 @@ def _get_registry_cache():
metrics_buffer = get_metrics_buffer_service()
-def _build_get_prompt_request(name: str, arguments: Optional[Dict[str, str]], meta_data: Dict[str, Any]) -> "types.ClientRequest":
- """Build a GetPrompt ClientRequest that carries _meta (CWE-20, CWE-284).
-
- Using ``by_alias=True`` ensures the Pydantic alias ``_meta`` is the only
- key written into the dict so the subsequent ``model_validate`` call
- resolves it correctly regardless of ``populate_by_name`` settings.
-
- ``send_request`` is used instead of ``session.get_prompt()`` because the
- MCP SDK helper does not expose a ``_meta`` parameter; this wrapper must be
- updated if the SDK later adds that capability.
-
- Args:
- name: The prompt name.
- arguments: Optional prompt arguments.
- meta_data: Validated metadata dict to inject as ``_meta``.
-
- Returns:
- A :class:`types.ClientRequest` ready to be passed to ``session.send_request``.
- """
- _gp_dict = GetPromptRequestParams(name=name, arguments=arguments).model_dump(by_alias=True)
- _gp_dict["_meta"] = meta_data
- return types.ClientRequest(GetPromptRequest(params=GetPromptRequestParams.model_validate(_gp_dict)))
-
-
-async def _get_prompt_with_meta(session: "ClientSession", name: str, arguments: Optional[Dict[str, str]], meta_data: Optional[Dict[str, Any]]) -> Any:
- """Dispatch a get_prompt call, injecting ``_meta`` when meta_data is provided.
-
- Eliminates the repeated ``if meta_data: send_request … else: get_prompt``
- pattern across every transport/pool branch in this module.
-
- Args:
- session: An active MCP :class:`ClientSession`.
- name: The prompt name.
- arguments: Optional prompt-rendering arguments.
- meta_data: Optional validated metadata dict. When ``None`` the standard
- SDK helper is used; when non-empty the low-level ``send_request``
- path is taken to carry ``_meta``.
-
- Returns:
- The raw MCP result object (caller extracts ``.messages``).
- """
- if meta_data:
- return await session.send_request(
- _build_get_prompt_request(name, arguments, meta_data),
- types.GetPromptResult,
- )
- return await session.get_prompt(name, arguments=arguments)
-
-
class PromptError(Exception):
"""Base class for prompt-related errors."""
@@ -365,13 +314,14 @@ def _should_fetch_gateway_prompt(prompt: DbPrompt) -> bool:
"""
return bool(getattr(prompt, "gateway_id", None)) and not bool(getattr(prompt, "template", ""))
- async def _fetch_gateway_prompt_result(self, prompt: DbPrompt, arguments: Optional[Dict[str, str]], meta_data: Optional[Dict[str, Any]] = None) -> PromptResult:
+ async def _fetch_gateway_prompt_result(self, prompt: DbPrompt, arguments: Optional[Dict[str, str]], meta_data: Optional[Dict[str, Any]] = None, user_identity: Optional[str] = None) -> PromptResult:
"""Fetch a rendered prompt from the upstream MCP gateway.
Args:
prompt: Gateway-backed prompt record from the catalog.
arguments: Optional prompt-rendering arguments.
meta_data: Optional metadata dict forwarded as ``_meta`` in the upstream MCP request.
+ user_identity: Effective requester email for session-pool isolation.
Returns:
Prompt result normalized into ContextForge models.
@@ -403,8 +353,6 @@ async def _fetch_gateway_prompt_result(self, prompt: DbPrompt, arguments: Option
transport = str(getattr(gateway, "transport", "streamable_http") or "streamable_http").lower()
registry_transport_type = TransportType.SSE if transport == "sse" else TransportType.STREAMABLE_HTTP
prompt_arguments = arguments or None
- # CWE-400: Validate meta_data limits before forwarding to upstream
- _validate_meta_data(meta_data)
try:
# #4205: Use the upstream session registry when a downstream Mcp-Session-Id
@@ -423,6 +371,7 @@ async def _fetch_gateway_prompt_result(self, prompt: DbPrompt, arguments: Option
url=gateway_url,
headers=headers,
transport_type=registry_transport_type,
+ user_identity=pool_user_identity,
) as upstream:
remote_result = await _get_prompt_with_meta(upstream.session, remote_name, prompt_arguments, meta_data)
return PromptResult(
@@ -437,12 +386,12 @@ async def _fetch_gateway_prompt_result(self, prompt: DbPrompt, arguments: Option
async with sse_client(url=gateway_url, headers=headers, timeout=settings.health_check_timeout) as streams:
async with ClientSession(*streams) as session:
await session.initialize()
- remote_result = await _get_prompt_with_meta(session, remote_name, prompt_arguments, meta_data)
+ remote_result = await session.get_prompt(remote_name, arguments=prompt_arguments)
else:
async with streamablehttp_client(url=gateway_url, headers=headers, timeout=settings.health_check_timeout) as (read_stream, write_stream, _get_session_id):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
- remote_result = await _get_prompt_with_meta(session, remote_name, prompt_arguments, meta_data)
+ remote_result = await session.get_prompt(remote_name, arguments=prompt_arguments)
return PromptResult(
messages=[
@@ -1906,7 +1855,7 @@ async def get_prompt(
None = unrestricted admin, [] = public-only, [...] = team-scoped.
plugin_context_table: Optional plugin context table from previous hooks for cross-hook state sharing.
plugin_global_context: Optional global context from middleware for consistency across hooks.
- _meta_data: Optional metadata forwarded as _meta to the upstream MCP gateway during prompt retrieval.
+ _meta_data: Optional metadata for prompt retrieval (not used currently).
Returns:
Prompt result with rendered messages
@@ -2067,7 +2016,7 @@ async def get_prompt(
if self._should_fetch_gateway_prompt(prompt):
# Release the read transaction before any remote network I/O.
db.commit()
- result = await self._fetch_gateway_prompt_result(prompt, arguments, meta_data=_meta_data)
+ result = await self._fetch_gateway_prompt_result(prompt, arguments, meta_data=_meta_data, user_identity=user)
elif not arguments:
result = PromptResult(
messages=[
diff --git a/mcpgateway/services/resource_service.py b/mcpgateway/services/resource_service.py
index c2e4bcf742..4ea233ba91 100644
--- a/mcpgateway/services/resource_service.py
+++ b/mcpgateway/services/resource_service.py
@@ -35,10 +35,9 @@
# Third-Party
import httpx
-from mcp import ClientSession, types
+from mcp import ClientSession
from mcp.client.sse import sse_client
from mcp.client.streamable_http import streamablehttp_client
-from mcp.types import ReadResourceRequest, ReadResourceRequestParams
import parse
from pydantic import ValidationError
from sqlalchemy import and_, delete, desc, not_, or_, select
@@ -48,7 +47,6 @@
# First-Party
from mcpgateway.common.models import ResourceContent, ResourceContents, ResourceTemplate, TextContent
from mcpgateway.common.validators import SecurityValidator
-from mcpgateway.common.validators import validate_meta_data as _validate_meta_data
from mcpgateway.config import settings
from mcpgateway.db import EmailTeam
from mcpgateway.db import EmailTeamMember as DbEmailTeamMember
@@ -116,53 +114,6 @@ def _get_registry_cache():
metrics_buffer = get_metrics_buffer_service()
-def _build_read_resource_request(uri: Any, meta_data: Dict[str, Any]) -> "types.ClientRequest":
- """Build a ReadResource ClientRequest that carries _meta (CWE-20, CWE-284).
-
- Using ``by_alias=True`` ensures the Pydantic alias ``_meta`` is the only
- key written into the dict so the subsequent ``model_validate`` call
- resolves it correctly regardless of ``populate_by_name`` settings.
-
- ``send_request`` is used instead of ``session.read_resource()`` because the
- MCP SDK helper does not expose a ``_meta`` parameter; this wrapper must be
- updated if the SDK later adds that capability.
-
- Args:
- uri: The resource URI.
- meta_data: Validated metadata dict to inject as ``_meta``.
-
- Returns:
- A :class:`types.ClientRequest` ready to be passed to ``session.send_request``.
- """
- _rp_dict = ReadResourceRequestParams(uri=uri).model_dump(by_alias=True)
- _rp_dict["_meta"] = meta_data
- return types.ClientRequest(ReadResourceRequest(params=ReadResourceRequestParams.model_validate(_rp_dict)))
-
-
-async def _read_resource_with_meta(session: "ClientSession", uri: Any, meta_data: Optional[Dict[str, Any]]) -> Any:
- """Dispatch a read_resource call, injecting ``_meta`` when meta_data is provided.
-
- Eliminates the repeated ``if meta_data: send_request … else: read_resource``
- pattern across every transport/pool branch in this module.
-
- Args:
- session: An active MCP :class:`ClientSession`.
- uri: The resource URI to read.
- meta_data: Optional validated metadata dict. When ``None`` the standard
- SDK helper is used; when non-empty the low-level ``send_request``
- path is taken to carry ``_meta``.
-
- Returns:
- The raw MCP result object (caller extracts ``.contents``).
- """
- if meta_data:
- return await session.send_request(
- _build_read_resource_request(uri, meta_data),
- types.ReadResourceResult,
- )
- return await session.read_resource(uri=uri)
-
-
class ResourceError(Exception):
"""Base class for resource-related errors."""
@@ -1622,7 +1573,7 @@ async def invoke_resource( # pylint: disable=unused-argument
resource_uri: str,
resource_template_uri: Optional[str] = None,
user_identity: Optional[Union[str, Dict[str, Any]]] = None,
- meta_data: Optional[Dict[str, Any]] = None, # Forwarded as _meta in upstream MCP requests
+ meta_data: Optional[Dict[str, Any]] = None, # Reserved for future MCP SDK support
resource_obj: Optional[Any] = None,
gateway_obj: Optional[Any] = None,
server_id: Optional[str] = None,
@@ -1736,10 +1687,6 @@ async def invoke_resource( # pylint: disable=unused-argument
'using template: /template'
"""
- # CWE-400: Validate meta_data limits before any further processing; invoke_resource is
- # a separate entry point that must enforce the same guards as read_resource.
- _validate_meta_data(meta_data)
-
uri = None
if resource_uri and resource_template_uri:
uri = resource_template_uri
@@ -1976,8 +1923,8 @@ async def connect_to_sse_session(server_url: str, uri: str, authentication: Opti
``None`` instead of raising.
Note:
- When meta_data is provided, the request is built using send_request
- with _meta injected into ReadResourceRequestParams.
+ MCP SDK 1.25.0 read_resource() does not support meta parameter.
+ When the SDK adds support, meta_data can be added back here.
Args:
server_url (str):
@@ -2024,6 +1971,7 @@ async def connect_to_sse_session(server_url: str, uri: str, authentication: Opti
headers=authentication,
transport_type=TransportType.SSE,
httpx_client_factory=_get_httpx_client_factory,
+ user_identity=pool_user_identity,
) as upstream:
resource_response = await _read_resource_with_meta(upstream.session, uri, meta_data)
return getattr(getattr(resource_response, "contents")[0], "text")
@@ -2035,7 +1983,8 @@ async def connect_to_sse_session(server_url: str, uri: str, authentication: Opti
):
async with ClientSession(read_stream, write_stream) as session:
_ = await session.initialize()
- resource_response = await _read_resource_with_meta(session, uri, meta_data)
+ # Note: MCP SDK 1.25.0 read_resource() does not support meta parameter
+ resource_response = await session.read_resource(uri=uri)
return getattr(getattr(resource_response, "contents")[0], "text")
except Exception as e:
# Sanitize error message to prevent URL secrets from leaking in logs
@@ -2057,8 +2006,8 @@ async def connect_to_streamablehttp_server(server_url: str, uri: str, authentica
of propagating the exception.
Note:
- When meta_data is provided, the request is built using send_request
- with _meta injected into ReadResourceRequestParams.
+ MCP SDK 1.25.0 read_resource() does not support meta parameter.
+ When the SDK adds support, meta_data can be added back here.
Args:
server_url (str):
@@ -2102,6 +2051,7 @@ async def connect_to_streamablehttp_server(server_url: str, uri: str, authentica
headers=authentication,
transport_type=TransportType.STREAMABLE_HTTP,
httpx_client_factory=_get_httpx_client_factory,
+ user_identity=pool_user_identity,
) as upstream:
resource_response = await _read_resource_with_meta(upstream.session, uri, meta_data)
return getattr(getattr(resource_response, "contents")[0], "text")
@@ -2114,7 +2064,8 @@ async def connect_to_streamablehttp_server(server_url: str, uri: str, authentica
):
async with ClientSession(read_stream, write_stream) as session:
_ = await session.initialize()
- resource_response = await _read_resource_with_meta(session, uri, meta_data)
+ # Note: MCP SDK 1.25.0 read_resource() does not support meta parameter
+ resource_response = await session.read_resource(uri=uri)
return getattr(getattr(resource_response, "contents")[0], "text")
except Exception as e:
# Sanitize error message to prevent URL secrets from leaking in logs
@@ -2128,8 +2079,10 @@ async def connect_to_streamablehttp_server(server_url: str, uri: str, authentica
resource_text = ""
if (gateway_transport).lower() == "sse":
+ # Note: meta_data not passed - MCP SDK 1.25.0 read_resource() doesn't support it
resource_text = await connect_to_sse_session(server_url=gateway_url, authentication=headers, uri=uri)
else:
+ # Note: meta_data not passed - MCP SDK 1.25.0 read_resource() doesn't support it
resource_text = await connect_to_streamablehttp_server(server_url=gateway_url, authentication=headers, uri=uri)
if span and resource_text is not None and is_output_capture_enabled("invoke.resource"):
set_span_attribute(span, "langfuse.observation.output", serialize_trace_payload({"content": resource_text}))
@@ -2241,8 +2194,6 @@ async def read_resource(
resource_db = None
server_scoped = False
resource_db_gateway = None # Only set when eager-loaded via Q2's joinedload
- # CWE-400: Validate meta_data limits before any further processing
- _validate_meta_data(meta_data)
content = None
uri = resource_uri or "unknown"
if resource_id:
@@ -2421,7 +2372,8 @@ async def read_resource(
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
- result = await _read_resource_with_meta(session, uri, meta_data)
+ # Note: MCP SDK read_resource() only accepts uri; _meta is not supported
+ result = await session.read_resource(uri=uri)
# Convert MCP result to MCP-compliant content models
# result.contents is a list of TextResourceContents or BlobResourceContents
diff --git a/mcpgateway/services/semantic_search_service.py b/mcpgateway/services/semantic_search_service.py
new file mode 100644
index 0000000000..851569e2d3
--- /dev/null
+++ b/mcpgateway/services/semantic_search_service.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+"""Semantic search service stub.
+
+Placeholder for the full semantic search implementation (issue #2229).
+Returns empty results, allowing fallback to keyword search.
+"""
+
+import logging
+from typing import Any, List, Optional
+
+logger = logging.getLogger(__name__)
+
+
+class SemanticSearchService:
+ """Stub semantic search service that returns empty results."""
+
+ async def search_tools(
+ self,
+ query: str,
+ db: Any = None,
+ limit: int = 10,
+ threshold: float = 0.7,
+ ) -> List[Any]:
+ """Return empty results — semantic search not yet implemented."""
+ logger.debug("Semantic search not available, returning empty results for query: %s", query[:100])
+ return []
+
+
+_instance: Optional[SemanticSearchService] = None
+
+
+def get_semantic_search_service() -> SemanticSearchService:
+ """Get or create the singleton semantic search service."""
+ global _instance
+ if _instance is None:
+ _instance = SemanticSearchService()
+ return _instance
diff --git a/mcpgateway/services/server_service.py b/mcpgateway/services/server_service.py
index 311fa92e1d..92e5c6af52 100644
--- a/mcpgateway/services/server_service.py
+++ b/mcpgateway/services/server_service.py
@@ -431,6 +431,11 @@ def convert_server_to_read(self, server: DbServer, include_metrics: bool = False
# OAuth 2.0 configuration for RFC 9728 Protected Resource Metadata
"oauth_enabled": getattr(server, "oauth_enabled", False),
"oauth_config": getattr(server, "oauth_config", None),
+ # Meta-server fields
+ "server_type": getattr(server, "server_type", "standard") or "standard",
+ "hide_underlying_tools": getattr(server, "hide_underlying_tools", True),
+ "meta_config": getattr(server, "meta_config", None),
+ "meta_scope": getattr(server, "meta_scope", None),
}
# Compute aggregated metrics only if requested (avoids N+1 queries in list operations)
@@ -598,6 +603,11 @@ async def register_server(
# OAuth 2.0 configuration for RFC 9728 Protected Resource Metadata
oauth_enabled=getattr(server_in, "oauth_enabled", False) or False,
oauth_config=oauth_config,
+ # Meta-server fields
+ server_type=getattr(server_in, "server_type", "standard") or "standard",
+ hide_underlying_tools=getattr(server_in, "hide_underlying_tools", True),
+ meta_config=getattr(server_in, "meta_config", None),
+ meta_scope=getattr(server_in, "meta_scope", None),
# Metadata fields
created_by=created_by,
created_from_ip=created_from_ip,
@@ -1326,6 +1336,16 @@ async def update_server(
elif server_update.oauth_config is not None:
server.oauth_config = await protect_oauth_config_for_storage(server_update.oauth_config, existing_oauth_config=server.oauth_config)
+ # Update meta-server fields if provided
+ if getattr(server_update, "server_type", None) is not None:
+ server.server_type = server_update.server_type
+ if getattr(server_update, "hide_underlying_tools", None) is not None:
+ server.hide_underlying_tools = server_update.hide_underlying_tools
+ if getattr(server_update, "meta_config", None) is not None:
+ server.meta_config = server_update.meta_config
+ if getattr(server_update, "meta_scope", None) is not None:
+ server.meta_scope = server_update.meta_scope
+
# Update metadata fields
server.updated_at = datetime.now(timezone.utc)
if modified_by:
diff --git a/mcpgateway/services/tool_service.py b/mcpgateway/services/tool_service.py
index 89a7369e8f..8a5651ad01 100644
--- a/mcpgateway/services/tool_service.py
+++ b/mcpgateway/services/tool_service.py
@@ -3928,9 +3928,10 @@ async def prepare_rust_mcp_tool_execution(
with fresh_db_session() as token_db:
token_storage = TokenStorageService(token_db)
- if not app_user_email:
+ effective_email = app_user_email or user_email
+ if not effective_email:
raise ToolInvocationError(f"User authentication required for OAuth-protected gateway '{gateway_name}'. Please ensure you are authenticated.")
- access_token = await token_storage.get_user_token(gateway_id_str, app_user_email)
+ access_token = await token_storage.get_user_token(gateway_id_str, effective_email)
if access_token:
headers = {"Authorization": f"Bearer {access_token}"}
@@ -5064,10 +5065,11 @@ async def invoke_tool(
token_storage = TokenStorageService(token_db)
# Get user-specific OAuth token
- if not app_user_email:
+ effective_email = app_user_email or user_email
+ if not effective_email:
raise ToolInvocationError(f"User authentication required for OAuth-protected gateway '{gateway_name}'. Please ensure you are authenticated.")
- access_token = await token_storage.get_user_token(gateway_id_str, app_user_email)
+ access_token = await token_storage.get_user_token(gateway_id_str, effective_email)
if access_token:
headers = {"Authorization": f"Bearer {access_token}"}
diff --git a/mcpgateway/services/vector_search_service.py b/mcpgateway/services/vector_search_service.py
new file mode 100644
index 0000000000..a4add0fb2c
--- /dev/null
+++ b/mcpgateway/services/vector_search_service.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+"""Vector search service stub.
+
+Placeholder for the full vector search implementation (issue #2229).
+Provides embedding retrieval and similarity search over tool embeddings.
+"""
+
+import logging
+import math
+from typing import Any, List, Optional
+
+from sqlalchemy.orm import Session
+
+logger = logging.getLogger(__name__)
+
+
+def _cosine_similarity_numpy(vec_a: List[float], vec_b: List[float]) -> float:
+ """Compute cosine similarity between two vectors without numpy."""
+ if not vec_a or not vec_b or len(vec_a) != len(vec_b):
+ return 0.0
+ dot = sum(a * b for a, b in zip(vec_a, vec_b))
+ norm_a = math.sqrt(sum(a * a for a in vec_a))
+ norm_b = math.sqrt(sum(b * b for b in vec_b))
+ if norm_a == 0 or norm_b == 0:
+ return 0.0
+ return dot / (norm_a * norm_b)
+
+
+class VectorSearchService:
+ """Stub vector search service for tool embeddings."""
+
+ def __init__(self, db: Optional[Session] = None):
+ self.db = db
+
+ def get_tool_embedding(self, db: Session, tool_id: str) -> Any:
+ """Retrieve the stored embedding for a tool.
+
+ Returns None when no embedding is found (stub always returns None).
+ """
+ try:
+ from mcpgateway.db import ToolEmbedding
+
+ result = db.query(ToolEmbedding).filter(ToolEmbedding.tool_id == tool_id).first()
+ return result
+ except Exception as e:
+ logger.debug("Failed to get tool embedding for %s: %s", tool_id, e)
+ return None
+
+ async def search_similar_tools(
+ self,
+ embedding: List[float],
+ limit: int = 10,
+ db: Optional[Session] = None,
+ ) -> List[Any]:
+ """Search for tools similar to the given embedding vector.
+
+ Returns empty list when no embeddings are available.
+ """
+ session = db or self.db
+ if session is None:
+ return []
+
+ try:
+ from mcpgateway.db import ToolEmbedding
+
+ all_embeddings = session.query(ToolEmbedding).all()
+ if not all_embeddings:
+ return []
+
+ scored = []
+ for te in all_embeddings:
+ sim = _cosine_similarity_numpy(embedding, te.embedding)
+ scored.append((te, sim))
+ scored.sort(key=lambda x: x[1], reverse=True)
+ return scored[:limit]
+ except Exception as e:
+ logger.debug("Vector similarity search failed: %s", e)
+ return []
diff --git a/mcpgateway/templates/admin.html b/mcpgateway/templates/admin.html
index 08fdc66641..393e111983 100644
--- a/mcpgateway/templates/admin.html
+++ b/mcpgateway/templates/admin.html
@@ -3358,6 +3358,48 @@
+
+
+
+
+
+
+
+ When enabled, this server exposes meta-tools (search, list, describe, execute) instead of individual underlying tools.
+
+
+
+
+
+
+
+
+ Clients will only see the 6 meta-tools, not the individual backend tools.
+
+
+
+
+
+
+
+
+
+
+
+
+ When enabled, this server exposes meta-tools (search, list, describe, execute) instead of individual underlying tools.
+
+
+
+
+
+
+
+
+ Clients will only see the 6 meta-tools, not the individual backend tools.
+
+
+
+
+
diff --git a/mcpgateway/transports/streamablehttp_transport.py b/mcpgateway/transports/streamablehttp_transport.py
index dff7425b01..ffe5dd9868 100644
--- a/mcpgateway/transports/streamablehttp_transport.py
+++ b/mcpgateway/transports/streamablehttp_transport.py
@@ -64,9 +64,9 @@
# First-Party
from mcpgateway.cache.global_config_cache import global_config_cache
from mcpgateway.common.models import LogLevel
-from mcpgateway.common.validators import validate_meta_data as _validate_meta_data
from mcpgateway.config import settings
from mcpgateway.db import SessionLocal
+from mcpgateway.meta_server.service import get_meta_server_service
from mcpgateway.middleware.rbac import _ACCESS_DENIED_MSG
from mcpgateway.observability import create_span
from mcpgateway.plugins.framework.models import UserContext
@@ -290,6 +290,9 @@ def _resolve_authorization_servers(oauth_config: Dict[str, Any]) -> List[str]:
return [url]
return []
+# Meta-server context: stores server_type for the current request
+server_type_var: contextvars.ContextVar[str] = contextvars.ContextVar("server_type", default="standard")
+hide_underlying_tools_var: contextvars.ContextVar[bool] = contextvars.ContextVar("hide_underlying_tools", default=True)
_shared_session_registry: Optional[Any] = None
_rust_event_store_client: Optional[httpx.AsyncClient] = None
@@ -1295,8 +1298,14 @@ async def _proxy_list_tools_to_gateway(gateway: Any, request_headers: dict, user
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
+ # Prepare params with _meta if provided
+ params = None
+ if meta:
+ params = PaginatedRequestParams(_meta=meta)
+ logger.debug("Forwarding _meta to remote gateway: %s", meta)
+
# List tools with _meta forwarded
- result = await session.list_tools(params=_build_paginated_params(meta))
+ result = await session.list_tools(params=params)
return result.tools
except Exception as e:
@@ -1343,16 +1352,21 @@ async def _proxy_list_resources_to_gateway(gateway: Any, request_headers: dict,
logger.info("Proxying resources/list to gateway %s at %s", gateway.id, gateway.url)
if meta:
- # CWE-532: log only key names, never values which may carry PII/tokens
- logger.debug("Forwarding _meta to remote gateway (keys: %s)", sorted(meta.keys()) if isinstance(meta, dict) else type(meta).__name__)
+ logger.debug("Forwarding _meta to remote gateway: %s", meta)
# Use MCP SDK to connect and list resources
async with streamablehttp_client(url=gateway.url, headers=headers, timeout=settings.mcpgateway_direct_proxy_timeout) as (read_stream, write_stream, _get_session_id):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()
+ # Prepare params with _meta if provided
+ params = None
+ if meta:
+ params = PaginatedRequestParams(_meta=meta)
+ logger.debug("Forwarding _meta to remote gateway: %s", meta)
+
# List resources with _meta forwarded
- result = await session.list_resources(params=_build_paginated_params(meta))
+ result = await session.list_resources(params=params)
logger.info("Received %s resources from gateway %s", len(result.resources), gateway.id)
return result.resources
@@ -1409,8 +1423,7 @@ async def _proxy_read_resource_to_gateway(gateway: Any, resource_uri: str, user_
logger.info("Proxying resources/read for %s to gateway %s at %s", resource_uri, gateway.id, gateway.url)
if meta:
- # CWE-532: log only key names, never values which may carry PII/tokens
- logger.debug("Forwarding _meta to remote gateway (keys: %s)", sorted(meta.keys()) if isinstance(meta, dict) else type(meta).__name__)
+ logger.debug("Forwarding _meta to remote gateway: %s", meta)
# Use MCP SDK to connect and read resource
async with streamablehttp_client(url=gateway.url, headers=headers, timeout=settings.mcpgateway_direct_proxy_timeout) as (read_stream, write_stream, _get_session_id):
@@ -1420,10 +1433,8 @@ async def _proxy_read_resource_to_gateway(gateway: Any, resource_uri: str, user_
# Prepare request params with _meta if provided
if meta:
# Create params and inject _meta
- # by_alias=True ensures the alias "_meta" key is written so
- # model_validate resolves it correctly (fixes CWE-20 silent drop)
request_params = ReadResourceRequestParams(uri=resource_uri)
- request_params_dict = request_params.model_dump(by_alias=True)
+ request_params_dict = request_params.model_dump()
request_params_dict["_meta"] = meta
# Send request with _meta
@@ -1535,6 +1546,9 @@ async def call_tool(name: str, arguments: dict) -> Union[
token_teams = user_context.get("teams") if user_context else None
is_admin = user_context.get("is_admin", False) if user_context else False
+ # Preserve actual email for OAuth token lookup before admin bypass nulls it
+ actual_user_email = user_email
+
# Admin bypass - only when token has NO team restrictions (token_teams is None)
# If token has explicit team scope (even empty [] for public-only), respect it
if is_admin and token_teams is None:
@@ -1567,6 +1581,20 @@ async def call_tool(name: str, arguments: dict) -> Union[
if not has_execute_permission:
raise PermissionError(_ACCESS_DENIED_MSG)
+ # Check if this is a meta-tool call on a meta-server
+ current_server_type = server_type_var.get()
+ meta_service = get_meta_server_service()
+ if meta_service.is_meta_server(current_server_type) and meta_service.is_meta_tool(name):
+ # Dispatch to meta-tool stub handler
+ # Use actual_user_email (not RBAC-filtered user_email) so OAuth token lookup works
+ result_data = await meta_service.handle_meta_tool_call(
+ name, arguments,
+ user_email=actual_user_email,
+ token_teams=token_teams,
+ request_headers=request_headers,
+ )
+ return [types.TextContent(type="text", text=orjson.dumps(result_data).decode())]
+
# Check if we're in direct_proxy mode by looking for X-Context-Forge-Gateway-Id header
gateway_id_from_header = extract_gateway_id_from_headers(request_headers)
@@ -2078,6 +2106,15 @@ async def list_tools() -> List[types.Tool]:
if not settings.mcp_require_auth:
await _check_server_oauth_enforcement(server_id, user_context)
+ # Check if this is a meta-server that should expose meta-tools instead
+ current_server_type = server_type_var.get()
+ current_hide_underlying = hide_underlying_tools_var.get()
+ meta_service = get_meta_server_service()
+ if meta_service.should_hide_underlying_tools(current_server_type, current_hide_underlying):
+ # Return meta-tools instead of underlying real tools
+ meta_tool_defs = meta_service.get_meta_tool_definitions()
+ return [types.Tool(name=td["name"], description=td["description"], inputSchema=td["inputSchema"]) for td in meta_tool_defs]
+
if server_id:
try:
async with get_db() as db:
@@ -2525,20 +2562,23 @@ async def read_resource(resource_uri: str) -> Union[str, bytes]:
return ""
# Direct proxy mode: forward request to remote MCP server
- # SECURITY: CWE-532 protection - Log only meta_data key names, NEVER values
- # Metadata may contain PII, authentication tokens, or sensitive context that
- # MUST NOT be written to logs. This is a critical security control.
- logger.debug(
- "Using direct_proxy mode for resources/read %s, server %s, gateway %s (from %s header), forwarding _meta keys: %s",
- resource_uri,
- server_id,
- gateway.id,
- GATEWAY_ID_HEADER,
- sorted(meta_data.keys()) if meta_data else None,
- )
- # CWE-400: validate _meta limits before network I/O (bypassed in direct-proxy branch)
- _validate_meta_data(meta_data)
- contents = await _proxy_read_resource_to_gateway(gateway, str(resource_uri), user_context, meta_data)
+ # Get _meta from request context if available
+ meta = None
+ try:
+ request_ctx = mcp_app.request_context
+ meta = request_ctx.meta
+ logger.info(
+ "Using direct_proxy mode for resources/read %s, server %s, gateway %s (from %s header), forwarding _meta: %s",
+ resource_uri,
+ server_id,
+ gateway.id,
+ GATEWAY_ID_HEADER,
+ meta,
+ )
+ except (LookupError, AttributeError) as e:
+ logger.debug("No request context available for _meta extraction: %s", e)
+
+ contents = await _proxy_read_resource_to_gateway(gateway, str(resource_uri), user_context, meta)
if contents:
# Return first content (text or blob)
first_content = contents[0]
@@ -4267,6 +4307,21 @@ async def handle_streamable_http( # noqa: PLR0911,PLR0912,PLR0915 — pylint: d
server_id_var.set(validated)
+ # Load server metadata for meta-server tool hiding
+ if validated:
+ try:
+ from mcpgateway.db import Server as DbServer # pylint: disable=import-outside-toplevel
+ db = SessionLocal()
+ try:
+ srv = db.query(DbServer).filter(DbServer.id == validated).first()
+ if srv:
+ server_type_var.set(getattr(srv, "server_type", "standard") or "standard")
+ hide_underlying_tools_var.set(getattr(srv, "hide_underlying_tools", True))
+ finally:
+ db.close()
+ except Exception as e:
+ logger.debug("Failed to load server metadata for meta-server: %s", e)
+
# For session affinity: wrap send to capture session ID from response headers
# This allows us to register ownership for new sessions created by the SDK
captured_session_id: Optional[str] = None
diff --git a/mcpgateway/utils/pgvector.py b/mcpgateway/utils/pgvector.py
new file mode 100644
index 0000000000..95bea652df
--- /dev/null
+++ b/mcpgateway/utils/pgvector.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+"""pgvector compatibility shim.
+
+Provides HAS_PGVECTOR flag and Vector type for optional pgvector support.
+When pgvector is not installed, falls back to JSON column storage.
+"""
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+try:
+ from pgvector.sqlalchemy import Vector # type: ignore[import-untyped]
+
+ HAS_PGVECTOR = True
+ logger.debug("pgvector extension available")
+except ImportError:
+ HAS_PGVECTOR = False
+ Vector = None # type: ignore[assignment,misc]
+ logger.debug("pgvector not available, using JSON fallback for embeddings")
diff --git a/tests/unit/mcpgateway/services/test_meta_tool_service.py b/tests/unit/mcpgateway/services/test_meta_tool_service.py
new file mode 100644
index 0000000000..4785490490
--- /dev/null
+++ b/tests/unit/mcpgateway/services/test_meta_tool_service.py
@@ -0,0 +1,180 @@
+# -*- coding: utf-8 -*-
+"""Location: ./tests/unit/mcpgateway/services/test_meta_tool_service.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+
+Unit tests for the Meta-Tool Service with mocked database layer.
+"""
+
+# Standard
+from unittest.mock import AsyncMock, MagicMock, patch
+import uuid
+
+# Third-Party
+import pytest
+
+# First-Party
+from mcpgateway.meta_server.schemas import DescribeToolResponse, ExecuteToolResponse
+from mcpgateway.services.meta_tool_service import MetaToolService
+
+
+class TestDescribeTool:
+ """Tests for describe_tool functionality."""
+
+ @pytest.mark.asyncio
+ async def test_describe_tool_success(self, test_db):
+ """Test successful tool description."""
+ service = MetaToolService(test_db)
+
+ # Create mock server
+ mock_server = MagicMock()
+ mock_server.id = "server-123"
+ mock_server.name = "test-server"
+
+ # Create mock tool
+ mock_tool = MagicMock()
+ mock_tool.id = str(uuid.uuid4())
+ mock_tool.name = "test_tool"
+ mock_tool.description = "Test tool description"
+ mock_tool.input_schema = {"type": "object", "properties": {"arg1": {"type": "string"}}}
+ mock_tool.output_schema = {"type": "object"}
+ mock_tool.tags = ["test", "sample"]
+ mock_tool.annotations = {"example": "data"}
+ mock_tool.servers = [mock_server]
+
+ # Mock the _resolve_tool method
+ with patch.object(service, '_resolve_tool', new_callable=AsyncMock) as mock_resolve:
+ mock_resolve.return_value = mock_tool
+
+ response = await service.describe_tool(
+ tool_name="test_tool",
+ include_metrics=False,
+ user_email="test@example.com",
+ token_teams=[],
+ is_admin=False,
+ scope=None,
+ )
+
+ assert isinstance(response, DescribeToolResponse)
+ assert response.name == "test_tool"
+ assert response.description == "Test tool description"
+ assert response.server_name == "test-server"
+ assert "test" in response.tags
+
+ @pytest.mark.asyncio
+ async def test_describe_tool_not_found(self, test_db):
+ """Test describe_tool with non-existent tool."""
+ service = MetaToolService(test_db)
+
+ with patch.object(service, '_resolve_tool', new_callable=AsyncMock, return_value=None):
+ with pytest.raises(ValueError, match="Tool not found"):
+ await service.describe_tool(
+ tool_name="nonexistent_tool",
+ include_metrics=False,
+ user_email="test@example.com",
+ token_teams=[],
+ is_admin=False,
+ scope=None,
+ )
+
+
+class TestExecuteTool:
+ """Tests for execute_tool functionality."""
+
+ @pytest.mark.asyncio
+ async def test_execute_tool_validation_error_returns_400(self, test_db):
+ """Test execute_tool returns validation error for invalid arguments."""
+ service = MetaToolService(test_db)
+
+ # Create mock tool with strict schema
+ mock_tool = MagicMock()
+ mock_tool.id = str(uuid.uuid4())
+ mock_tool.name = "strict_tool"
+ mock_tool.input_schema = {
+ "type": "object",
+ "properties": {"name": {"type": "string"}},
+ "required": ["name"],
+ }
+
+ with patch.object(service, '_resolve_tool', new_callable=AsyncMock, return_value=mock_tool):
+ # Missing required argument should raise ValueError
+ with pytest.raises(ValueError, match="Argument validation failed"):
+ await service.execute_tool(
+ tool_name="strict_tool",
+ arguments={}, # Missing 'name'
+ user_email="test@example.com",
+ token_teams=[],
+ is_admin=False,
+ scope=None,
+ )
+
+ @pytest.mark.asyncio
+ async def test_execute_tool_backend_error_surfaces_cleanly(self, test_db):
+ """Test backend errors are surfaced cleanly in response."""
+ service = MetaToolService(test_db)
+
+ # Create mock tool
+ mock_tool = MagicMock()
+ mock_tool.id = str(uuid.uuid4())
+ mock_tool.name = "failing_tool"
+ mock_tool.input_schema = {}
+
+ with patch.object(service, '_resolve_tool', new_callable=AsyncMock, return_value=mock_tool):
+ # Mock tool_service.invoke_tool to raise an exception
+ with patch.object(service.tool_service, 'invoke_tool', new_callable=AsyncMock) as mock_invoke:
+ mock_invoke.side_effect = Exception("Backend connection failed")
+
+ response = await service.execute_tool(
+ tool_name="failing_tool",
+ arguments={},
+ user_email="test@example.com",
+ token_teams=[],
+ is_admin=False,
+ scope=None,
+ )
+
+ assert response.success is False
+ assert response.error == "Backend connection failed"
+ assert response.execution_time_ms is not None
+
+ @pytest.mark.asyncio
+ async def test_execute_tool_metadata_present(self, test_db):
+ """Test execution metadata is present in response."""
+ service = MetaToolService(test_db)
+
+ # Create mock tool
+ mock_tool = MagicMock()
+ mock_tool.id = str(uuid.uuid4())
+ mock_tool.name = "meta_tool"
+ mock_tool.input_schema = {}
+
+ # Create mock result
+ mock_result = MagicMock()
+ mock_result.isError = False
+ mock_content = MagicMock()
+ mock_content.text = "success"
+ mock_result.content = [mock_content]
+
+ with patch.object(service, '_resolve_tool', new_callable=AsyncMock, return_value=mock_tool):
+ with patch.object(service.tool_service, 'invoke_tool', new_callable=AsyncMock, return_value=mock_result) as mock_invoke:
+ response = await service.execute_tool(
+ tool_name="meta_tool",
+ arguments={},
+ user_email="test@example.com",
+ token_teams=[],
+ is_admin=False,
+ scope=None,
+ )
+
+ # Verify metadata
+ assert response.tool_name == "meta_tool"
+ assert response.execution_time_ms is not None
+ assert isinstance(response.execution_time_ms, (int, float))
+ assert response.execution_time_ms >= 0
+
+ # Verify invoke_tool was called with proper metadata
+ mock_invoke.assert_called_once()
+ call_kwargs = mock_invoke.call_args.kwargs
+ assert "meta_data" in call_kwargs
+ assert call_kwargs["meta_data"]["meta_tool"] == "execute_tool"
+ assert "request_id" in call_kwargs["meta_data"]
diff --git a/tests/unit/mcpgateway/test_meta_server.py b/tests/unit/mcpgateway/test_meta_server.py
new file mode 100644
index 0000000000..7f531c497a
--- /dev/null
+++ b/tests/unit/mcpgateway/test_meta_server.py
@@ -0,0 +1,2257 @@
+# -*- coding: utf-8 -*-
+"""Location: ./tests/unit/mcpgateway/test_meta_server.py
+Copyright 2025
+SPDX-License-Identifier: Apache-2.0
+
+Unit tests for the Meta-Server feature.
+
+Tests cover:
+- Meta-server schema validation (ServerType, MetaToolScope, MetaConfig)
+- Meta-tool request/response schema contracts
+- Meta-server creation with server_type='meta'
+- Config validation (limits, ranges)
+- Meta-tools appearing when server_type == 'meta'
+- Underlying tools hidden when hide_underlying_tools is enabled
+- MetaServerService stub handlers
+- search_tools: hybrid semantic + keyword search, merge, ranking, scope, pagination
+- get_similar_tools: vector similarity with self-filtering and scope
+- _apply_scope_filtering: all 7 scope fields with AND semantics
+- Helper methods: _get_tool_metadata, _get_tools_matching_tags, _map_to_tool_summaries
+"""
+
+# Standard
+import asyncio
+from types import SimpleNamespace
+from unittest.mock import AsyncMock, MagicMock, patch
+
+# Third-Party
+import pytest
+from pydantic import ValidationError
+
+# First-Party
+from mcpgateway.meta_server.schemas import (
+ DescribeToolRequest,
+ DescribeToolResponse,
+ ExecuteToolRequest,
+ ExecuteToolResponse,
+ GetPromptRequest,
+ GetPromptResponse,
+ GetSimilarToolsRequest,
+ GetSimilarToolsResponse,
+ GetToolCategoriesRequest,
+ GetToolCategoriesResponse,
+ ListPromptsRequest,
+ ListPromptsResponse,
+ ListResourcesRequest,
+ ListResourcesResponse,
+ ListToolsRequest,
+ ListToolsResponse,
+ META_TOOL_DEFINITIONS,
+ MetaConfig,
+ MetaToolScope,
+ ReadResourceRequest,
+ ReadResourceResponse,
+ SearchToolsRequest,
+ SearchToolsResponse,
+ ServerType,
+ ToolSummary,
+)
+from mcpgateway.meta_server.service import MetaServerService, get_meta_server_service
+from mcpgateway.schemas import ToolSearchResult
+
+# ServerType Enum Tests
+
+class TestServerType:
+ """Tests for the ServerType enum."""
+
+ def test_standard_value(self):
+ """Test standard server type value."""
+ assert ServerType.STANDARD.value == "standard"
+
+ def test_meta_value(self):
+ """Test meta server type value."""
+ assert ServerType.META.value == "meta"
+
+ def test_from_string_meta(self):
+ """Test creating ServerType from string 'meta'."""
+ assert ServerType("meta") == ServerType.META
+
+ def test_from_string_standard(self):
+ """Test creating ServerType from string 'standard'."""
+ assert ServerType("standard") == ServerType.STANDARD
+
+ def test_invalid_type_raises(self):
+ """Test that invalid server type raises ValueError."""
+ with pytest.raises(ValueError):
+ ServerType("invalid")
+
+# MetaToolScope Tests
+
+class TestMetaToolScope:
+ """Tests for the MetaToolScope configuration model."""
+
+ def test_default_scope(self):
+ """Test that default scope has empty lists."""
+ scope = MetaToolScope()
+ assert scope.include_tags == []
+ assert scope.exclude_tags == []
+ assert scope.include_servers == []
+ assert scope.exclude_servers == []
+ assert scope.include_visibility == []
+ assert scope.include_teams == []
+ assert scope.name_patterns == []
+
+ def test_scope_with_tags(self):
+ """Test scope with tag filters."""
+ scope = MetaToolScope(include_tags=["prod", "stable"], exclude_tags=["deprecated"])
+ assert scope.include_tags == ["prod", "stable"]
+ assert scope.exclude_tags == ["deprecated"]
+
+ def test_scope_with_servers(self):
+ """Test scope with server filters."""
+ scope = MetaToolScope(include_servers=["s1", "s2"], exclude_servers=["s3"])
+ assert scope.include_servers == ["s1", "s2"]
+ assert scope.exclude_servers == ["s3"]
+
+ def test_scope_with_visibility(self):
+ """Test scope with valid visibility values."""
+ scope = MetaToolScope(include_visibility=["public", "team"])
+ assert scope.include_visibility == ["public", "team"]
+
+ def test_scope_invalid_visibility_raises(self):
+ """Test that invalid visibility value raises ValidationError."""
+ with pytest.raises(ValidationError):
+ MetaToolScope(include_visibility=["invalid_level"])
+
+ def test_scope_serialization(self):
+ """Test scope serializes correctly with camelCase aliases."""
+ scope = MetaToolScope(include_tags=["test"], name_patterns=["db_*"])
+ data = scope.model_dump(by_alias=True)
+ assert "includeTags" in data
+ assert "namePatterns" in data
+ assert data["includeTags"] == ["test"]
+
+ def test_scope_with_teams(self):
+ """Test scope with team filters."""
+ scope = MetaToolScope(include_teams=["team-1", "team-2"])
+ assert scope.include_teams == ["team-1", "team-2"]
+
+ def test_scope_name_patterns(self):
+ """Test scope with name patterns."""
+ scope = MetaToolScope(name_patterns=["db_*", "*_tool"])
+ assert scope.name_patterns == ["db_*", "*_tool"]
+
+
+# MetaConfig Tests
+
+class TestMetaConfig:
+ """Tests for the MetaConfig configuration model."""
+
+ def test_default_config(self):
+ """Test default config values."""
+ config = MetaConfig()
+ assert config.enable_semantic_search is False
+ assert config.enable_categories is False
+ assert config.enable_similar_tools is False
+ assert config.default_search_limit == 50
+ assert config.max_search_limit == 200
+ assert config.include_metrics_in_search is False
+
+ def test_custom_config(self):
+ """Test custom config values."""
+ config = MetaConfig(
+ enable_semantic_search=True,
+ enable_categories=True,
+ enable_similar_tools=True,
+ default_search_limit=25,
+ max_search_limit=500,
+ include_metrics_in_search=True,
+ )
+ assert config.enable_semantic_search is True
+ assert config.default_search_limit == 25
+ assert config.max_search_limit == 500
+
+ def test_config_search_limit_range(self):
+ """Test that default_search_limit respects range constraints."""
+ with pytest.raises(ValidationError):
+ MetaConfig(default_search_limit=0) # Must be >= 1
+
+ def test_config_max_search_limit_range(self):
+ """Test that max_search_limit respects range constraints."""
+ with pytest.raises(ValidationError):
+ MetaConfig(max_search_limit=0) # Must be >= 1
+
+ def test_config_max_less_than_default_raises(self):
+ """Test that max_search_limit < default_search_limit raises ValidationError."""
+ with pytest.raises(ValidationError):
+ MetaConfig(default_search_limit=100, max_search_limit=50)
+
+ def test_config_serialization(self):
+ """Test config serializes correctly with camelCase aliases."""
+ config = MetaConfig(enable_semantic_search=True)
+ data = config.model_dump(by_alias=True)
+ assert "enableSemanticSearch" in data
+ assert data["enableSemanticSearch"] is True
+
+ def test_config_max_equals_default(self):
+ """Test that max_search_limit == default_search_limit is valid."""
+ config = MetaConfig(default_search_limit=100, max_search_limit=100)
+ assert config.max_search_limit == 100
+
+
+# Meta-Tool Request/Response Schema Tests
+
+class TestSearchToolsSchemas:
+ """Tests for search_tools request/response schemas."""
+
+ def test_request_minimal(self):
+ """Test minimal search request."""
+ req = SearchToolsRequest(query="database")
+ assert req.query == "database"
+ assert req.limit == 50
+ assert req.offset == 0
+
+ def test_request_with_all_fields(self):
+ """Test search request with all fields."""
+ req = SearchToolsRequest(query="test", limit=10, offset=5, tags=["db"], include_metrics=True)
+ assert req.limit == 10
+ assert req.tags == ["db"]
+
+ def test_request_empty_query_raises(self):
+ """Test that empty query raises ValidationError."""
+ with pytest.raises(ValidationError):
+ SearchToolsRequest(query="")
+
+ def test_response_empty(self):
+ """Test empty search response."""
+ resp = SearchToolsResponse(tools=[], total_count=0, query="test", has_more=False)
+ assert resp.total_count == 0
+ assert resp.has_more is False
+
+
+class TestListToolsSchemas:
+ """Tests for list_tools request/response schemas."""
+
+ def test_request_defaults(self):
+ """Test list request defaults."""
+ req = ListToolsRequest()
+ assert req.limit == 50
+ assert req.offset == 0
+
+ def test_response_with_tools(self):
+ """Test list response with tool summaries."""
+ tool = ToolSummary(name="my_tool", description="A test tool", server_id="s1", server_name="Server 1")
+ resp = ListToolsResponse(tools=[tool], total_count=1, has_more=False)
+ assert len(resp.tools) == 1
+ assert resp.tools[0].name == "my_tool"
+
+
+class TestDescribeToolSchemas:
+ """Tests for describe_tool request/response schemas."""
+
+ def test_request(self):
+ """Test describe request."""
+ req = DescribeToolRequest(tool_name="query_db")
+ assert req.tool_name == "query_db"
+
+ def test_request_empty_name_raises(self):
+ """Test that empty tool_name raises ValidationError."""
+ with pytest.raises(ValidationError):
+ DescribeToolRequest(tool_name="")
+
+ def test_response(self):
+ """Test describe response."""
+ resp = DescribeToolResponse(name="query_db", description="Run SQL queries")
+ assert resp.name == "query_db"
+ assert resp.input_schema is None
+
+
+class TestExecuteToolSchemas:
+ """Tests for execute_tool request/response schemas."""
+
+ def test_request(self):
+ """Test execute request."""
+ req = ExecuteToolRequest(tool_name="query_db", arguments={"sql": "SELECT 1"})
+ assert req.tool_name == "query_db"
+ assert req.arguments["sql"] == "SELECT 1"
+
+ def test_response_success(self):
+ """Test successful execute response."""
+ resp = ExecuteToolResponse(tool_name="query_db", success=True, result={"rows": []})
+ assert resp.success is True
+ assert resp.error is None
+
+ def test_response_failure(self):
+ """Test failed execute response."""
+ resp = ExecuteToolResponse(tool_name="query_db", success=False, error="Connection failed")
+ assert resp.success is False
+ assert resp.error == "Connection failed"
+
+
+class TestGetToolCategoriesSchemas:
+ """Tests for get_tool_categories request/response schemas."""
+
+ def test_request_defaults(self):
+ """Test categories request defaults."""
+ req = GetToolCategoriesRequest()
+ assert req.include_counts is True
+
+ def test_response_empty(self):
+ """Test empty categories response."""
+ resp = GetToolCategoriesResponse(categories=[], total_categories=0)
+ assert resp.total_categories == 0
+
+
+class TestGetSimilarToolsSchemas:
+ """Tests for get_similar_tools request/response schemas."""
+
+ def test_request(self):
+ """Test similar tools request."""
+ req = GetSimilarToolsRequest(tool_name="query_db", limit=5)
+ assert req.tool_name == "query_db"
+ assert req.limit == 5
+
+ def test_response_empty(self):
+ """Test empty similar tools response."""
+ resp = GetSimilarToolsResponse(reference_tool="query_db", similar_tools=[], total_found=0)
+ assert resp.reference_tool == "query_db"
+ assert resp.total_found == 0
+
+
+# META_TOOL_DEFINITIONS Tests
+
+class TestMetaToolDefinitions:
+ """Tests for the META_TOOL_DEFINITIONS registry."""
+
+ def test_all_six_tools_defined(self):
+ """Test that all six meta-tools are defined."""
+ expected = {"search_tools", "list_tools", "describe_tool", "execute_tool", "get_tool_categories", "get_similar_tools"}
+ assert set(META_TOOL_DEFINITIONS.keys()) == expected
+
+ def test_each_has_description(self):
+ """Test that each meta-tool has a description."""
+ for name, defn in META_TOOL_DEFINITIONS.items():
+ assert "description" in defn, f"{name} missing description"
+ assert isinstance(defn["description"], str)
+
+ def test_each_has_input_schema(self):
+ """Test that each meta-tool has an input_schema."""
+ for name, defn in META_TOOL_DEFINITIONS.items():
+ assert "input_schema" in defn, f"{name} missing input_schema"
+ assert isinstance(defn["input_schema"], dict)
+
+
+# MetaServerService Tests
+
+class TestMetaServerService:
+ """Tests for the MetaServerService."""
+
+ def test_get_meta_tool_definitions(self):
+ """Test that meta-tool definitions are returned correctly."""
+ service = MetaServerService()
+ defs = service.get_meta_tool_definitions()
+ assert len(defs) == 6
+ names = {d["name"] for d in defs}
+ assert "search_tools" in names
+ assert "execute_tool" in names
+
+ def test_is_meta_server(self):
+ """Test is_meta_server check."""
+ service = MetaServerService()
+ assert service.is_meta_server("meta") is True
+ assert service.is_meta_server("standard") is False
+ assert service.is_meta_server(None) is False
+
+ def test_should_hide_underlying_tools(self):
+ """Test should_hide_underlying_tools logic."""
+ service = MetaServerService()
+ assert service.should_hide_underlying_tools("meta", True) is True
+ assert service.should_hide_underlying_tools("meta", False) is False
+ assert service.should_hide_underlying_tools("standard", True) is False
+ assert service.should_hide_underlying_tools(None, True) is False
+
+ def test_is_meta_tool(self):
+ """Test is_meta_tool check."""
+ service = MetaServerService()
+ assert service.is_meta_tool("search_tools") is True
+ assert service.is_meta_tool("list_tools") is True
+ assert service.is_meta_tool("some_random_tool") is False
+
+ def test_stub_search_tools(self):
+ """Test search_tools returns empty results when both search sources return nothing."""
+ service = MetaServerService()
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(return_value=[])
+
+ def mock_get_db():
+ db = MagicMock()
+ db.query.return_value.filter.return_value.limit.return_value.all.return_value = []
+ yield db
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {"query": "database"}))
+ assert result["query"] == "database"
+ assert result["tools"] == []
+ assert result["totalCount"] == 0
+
+ def test_list_tools_returns_empty_when_no_tools(self):
+ """Test list_tools returns empty results when no tools exist."""
+ service = MetaServerService()
+
+ def mock_get_db():
+ db = MagicMock()
+ yield db
+
+ # Mock ToolService.list_tools to return empty list
+ from mcpgateway.services.tool_service import ToolService
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch.object(ToolService, "list_tools", new_callable=AsyncMock, return_value=([], None)),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("list_tools", {}))
+
+ assert result["tools"] == []
+ assert result["totalCount"] == 0
+ assert result["hasMore"] is False
+
+ def test_stub_describe_tool(self):
+ """Test describe_tool stub returns placeholder response."""
+ service = MetaServerService()
+
+ def mock_get_db():
+ db = MagicMock()
+ yield db
+
+ mock_response = DescribeToolResponse(name="my_tool", description="Stub description for my_tool")
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch("mcpgateway.services.meta_tool_service.MetaToolService.describe_tool", new_callable=AsyncMock, return_value=mock_response),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("describe_tool", {"tool_name": "my_tool"}))
+ assert result["name"] == "my_tool"
+ assert "Stub description" in result["description"]
+
+ def test_stub_execute_tool(self):
+ """Test execute_tool stub returns not-implemented response."""
+ service = MetaServerService()
+
+ def mock_get_db():
+ db = MagicMock()
+ yield db
+
+ mock_response = ExecuteToolResponse(tool_name="my_tool", success=False, error="This action is not yet implemented")
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch("mcpgateway.services.meta_tool_service.MetaToolService.execute_tool", new_callable=AsyncMock, return_value=mock_response),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("execute_tool", {"tool_name": "my_tool"}))
+ assert result["toolName"] == "my_tool"
+ assert result["success"] is False
+ assert "not yet implemented" in result["error"]
+
+ def test_stub_get_tool_categories(self):
+ """Test get_tool_categories stub returns placeholder response."""
+ service = MetaServerService()
+ result = asyncio.run(service.handle_meta_tool_call("get_tool_categories", {}))
+ assert result["categories"] == []
+ assert result["totalCategories"] == 0
+
+ def test_stub_get_similar_tools(self):
+ """Test get_similar_tools returns empty when tool not found in DB."""
+ service = MetaServerService()
+
+ def mock_get_db():
+ db = MagicMock()
+ db.query.return_value.filter.return_value.first.return_value = None
+ yield db
+
+ with patch("mcpgateway.meta_server.service.get_db", mock_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("get_similar_tools", {"tool_name": "my_tool"}))
+ assert result["referenceTool"] == "my_tool"
+ assert result["similarTools"] == []
+
+ def test_unknown_meta_tool_raises(self):
+ """Test that unknown meta-tool name raises ValueError."""
+ service = MetaServerService()
+ with pytest.raises(ValueError, match="Unknown meta-tool"):
+ asyncio.run(service.handle_meta_tool_call("nonexistent_tool", {}))
+
+ def test_singleton_service(self):
+ """Test that get_meta_server_service returns a singleton."""
+ s1 = get_meta_server_service()
+ s2 = get_meta_server_service()
+ assert s1 is s2
+
+
+# Server Schema Integration Tests (ServerCreate with server_type)
+
+class TestServerCreateMetaType:
+ """Tests for ServerCreate schema with meta server type support."""
+
+ def test_default_server_type(self):
+ """Test that default server_type is 'standard'."""
+ from mcpgateway.schemas import ServerCreate
+
+ server = ServerCreate(name="Test Server")
+ assert server.server_type == "standard"
+
+ def test_meta_server_type(self):
+ """Test creating a server with type 'meta'."""
+ from mcpgateway.schemas import ServerCreate
+
+ server = ServerCreate(name="Meta Server", server_type="meta")
+ assert server.server_type == "meta"
+
+ def test_invalid_server_type_raises(self):
+ """Test that invalid server_type raises ValidationError."""
+ from mcpgateway.schemas import ServerCreate
+
+ with pytest.raises(ValidationError):
+ ServerCreate(name="Bad Server", server_type="invalid")
+
+ def test_hide_underlying_tools_default(self):
+ """Test that hide_underlying_tools defaults to True."""
+ from mcpgateway.schemas import ServerCreate
+
+ server = ServerCreate(name="Test Server")
+ assert server.hide_underlying_tools is True
+
+ def test_meta_config_field(self):
+ """Test that meta_config can be set."""
+ from mcpgateway.schemas import ServerCreate
+
+ config = {"enable_semantic_search": True, "default_search_limit": 25}
+ server = ServerCreate(name="Meta Server", server_type="meta", meta_config=config)
+ assert server.meta_config == config
+
+ def test_meta_scope_field(self):
+ """Test that meta_scope can be set."""
+ from mcpgateway.schemas import ServerCreate
+
+ scope = {"include_tags": ["production"], "exclude_servers": ["legacy"]}
+ server = ServerCreate(name="Meta Server", server_type="meta", meta_scope=scope)
+ assert server.meta_scope == scope
+
+
+class TestServerUpdateMetaType:
+ """Tests for ServerUpdate schema with meta server type support."""
+
+ def test_update_server_type(self):
+ """Test updating server_type."""
+ from mcpgateway.schemas import ServerUpdate
+
+ update = ServerUpdate(server_type="meta")
+ assert update.server_type == "meta"
+
+ def test_update_invalid_server_type_raises(self):
+ """Test that invalid server_type raises ValidationError on update."""
+ from mcpgateway.schemas import ServerUpdate
+
+ with pytest.raises(ValidationError):
+ ServerUpdate(server_type="bad_type")
+
+ def test_update_meta_config(self):
+ """Test updating meta_config."""
+ from mcpgateway.schemas import ServerUpdate
+
+ update = ServerUpdate(meta_config={"enable_categories": True})
+ assert update.meta_config == {"enable_categories": True}
+
+
+class TestServerReadMetaFields:
+ """Tests for ServerRead schema meta-server fields."""
+
+ def test_read_defaults(self):
+ """Test that ServerRead has correct meta field defaults."""
+ from datetime import datetime, timezone
+
+ from mcpgateway.schemas import ServerRead
+
+ now = datetime.now(timezone.utc)
+ read = ServerRead(
+ id="test-id",
+ name="Test Server",
+ description=None,
+ icon=None,
+ created_at=now,
+ updated_at=now,
+ enabled=True,
+ )
+ assert read.server_type == "standard"
+ assert read.hide_underlying_tools is True
+ assert read.meta_config is None
+ assert read.meta_scope is None
+
+ def test_read_meta_server(self):
+ """Test ServerRead with meta server fields populated."""
+ from datetime import datetime, timezone
+
+ from mcpgateway.schemas import ServerRead
+
+ now = datetime.now(timezone.utc)
+ read = ServerRead(
+ id="test-id",
+ name="Meta Server",
+ description="A meta server",
+ icon=None,
+ created_at=now,
+ updated_at=now,
+ enabled=True,
+ server_type="meta",
+ hide_underlying_tools=True,
+ meta_config={"enable_semantic_search": True},
+ meta_scope={"include_tags": ["production"]},
+ )
+ assert read.server_type == "meta"
+ assert read.hide_underlying_tools is True
+ assert read.meta_config["enable_semantic_search"] is True
+ assert read.meta_scope["include_tags"] == ["production"]
+
+
+# DB Model Integration Tests
+
+class TestServerDBModelMetaFields:
+ """Tests for Server DB model meta-server fields."""
+
+ def test_server_db_has_meta_fields(self, test_db):
+ """Test that Server DB model has meta-server columns."""
+ from mcpgateway.db import Server as DbServer
+
+ server = DbServer(
+ name="Meta Test Server",
+ server_type="meta",
+ hide_underlying_tools=True,
+ meta_config={"enable_categories": True},
+ meta_scope={"include_tags": ["test"]},
+ )
+ test_db.add(server)
+ test_db.commit()
+ test_db.refresh(server)
+
+ assert server.server_type == "meta"
+ assert server.hide_underlying_tools is True
+ assert server.meta_config == {"enable_categories": True}
+ assert server.meta_scope == {"include_tags": ["test"]}
+
+ def test_server_db_default_type_standard(self, test_db):
+ """Test that Server DB model defaults to server_type='standard'."""
+ from mcpgateway.db import Server as DbServer
+
+ server = DbServer(name="Standard Server")
+ test_db.add(server)
+ test_db.commit()
+ test_db.refresh(server)
+
+ assert server.server_type == "standard"
+ assert server.hide_underlying_tools is True # Default True
+ assert server.meta_config is None
+ assert server.meta_scope is None
+
+
+# ---------------------------------------------------------------------------
+# Helpers for mocking DB and services used by search/similar
+# ---------------------------------------------------------------------------
+
+def _make_tool_search_result(name, description="desc", server_id="s1", server_name="Server1", score=0.8):
+ """Shorthand factory for ToolSearchResult."""
+ return ToolSearchResult(
+ tool_name=name,
+ description=description,
+ server_id=server_id,
+ server_name=server_name,
+ similarity_score=score,
+ )
+
+
+def _make_mock_tool(name, description="desc", gateway_id="s1", tags=None, visibility="public", team_id=None, enabled=True, input_schema=None):
+ """Create a mock Tool ORM object."""
+ tool = MagicMock()
+ tool.name = name
+ tool._computed_name = name
+ tool.description = description
+ tool.gateway_id = gateway_id
+ tool.gateway = SimpleNamespace(name="Server1")
+ tool.tags = tags or []
+ tool.visibility = visibility
+ tool.team_id = team_id
+ tool.enabled = enabled
+ tool.input_schema = input_schema
+ tool.id = f"id-{name}"
+ return tool
+
+
+def _mock_get_db_with_tools(tools):
+ """Return a mock get_db generator that supports query().filter().* patterns.
+
+ The mock DB handles several query patterns used across the service:
+ - .filter(...).limit(...).all() → returns tools (keyword search)
+ - .filter(...).all() → returns tools (metadata / tag queries)
+ - .filter(...).first() → returns first tool or None (tool lookup)
+ """
+ def mock_get_db():
+ db = MagicMock()
+ query = db.query.return_value
+
+ # Chain .filter() calls (supports multiple chained filters)
+ filter_mock = MagicMock()
+ query.filter.return_value = filter_mock
+ filter_mock.filter.return_value = filter_mock # support chained .filter().filter()
+
+ # .limit().all() for keyword search
+ filter_mock.limit.return_value.all.return_value = tools
+ # .all() for metadata / tag queries
+ filter_mock.all.return_value = tools
+ # .first() for single-tool lookup
+ filter_mock.first.return_value = tools[0] if tools else None
+
+ yield db
+
+ return mock_get_db
+
+
+# ---------------------------------------------------------------------------
+# search_tools comprehensive tests
+# ---------------------------------------------------------------------------
+
+class TestSearchToolsImplementation:
+ """Comprehensive tests for the _search_tools implementation."""
+
+ def test_search_tools_semantic_results_returned(self):
+ """Test that semantic search results are included in response."""
+ service = MetaServerService()
+ semantic_results = [
+ _make_tool_search_result("tool_a", score=0.9),
+ _make_tool_search_result("tool_b", score=0.7),
+ ]
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(return_value=semantic_results)
+
+ mock_tools = [_make_mock_tool("tool_a"), _make_mock_tool("tool_b")]
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {"query": "test"}))
+
+ assert result["query"] == "test"
+ assert result["totalCount"] == 2
+ assert len(result["tools"]) == 2
+
+ def test_search_tools_keyword_fallback_when_semantic_fails(self):
+ """Test keyword search works when semantic search raises an exception."""
+ service = MetaServerService()
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(side_effect=RuntimeError("Embedding service down"))
+
+ mock_tools = [_make_mock_tool("db_query", description="Query a database")]
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {"query": "db_query"}))
+
+ # Keyword fallback should still produce results
+ assert result["totalCount"] >= 1
+ tool_names = [t["name"] for t in result["tools"]]
+ assert "db_query" in tool_names
+
+ def test_search_tools_both_fail_returns_empty(self):
+ """Test that when both semantic and keyword search fail, empty results returned."""
+ service = MetaServerService()
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(side_effect=RuntimeError("fail"))
+
+ def broken_get_db():
+ raise RuntimeError("DB down")
+ yield # noqa: unreachable - needed to make it a generator
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", broken_get_db),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {"query": "anything"}))
+
+ assert result["tools"] == []
+ assert result["totalCount"] == 0
+
+ def test_search_tools_merge_dedup_keeps_higher_score(self):
+ """Test that duplicates are merged keeping the higher score."""
+ service = MetaServerService()
+ semantic_results = [_make_tool_search_result("shared_tool", score=0.9)]
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(return_value=semantic_results)
+
+ # Keyword search also finds "shared_tool" with a lower score
+ mock_tools = [_make_mock_tool("shared_tool")]
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {"query": "shared_tool"}))
+
+ # Should have one result, not two
+ assert result["totalCount"] == 1
+ assert len(result["tools"]) == 1
+ assert result["tools"][0]["name"] == "shared_tool"
+
+ def test_search_tools_ranking_descending_by_score(self):
+ """Test that results are sorted descending by similarity score."""
+ service = MetaServerService()
+ semantic_results = [
+ _make_tool_search_result("low_score", score=0.3),
+ _make_tool_search_result("high_score", score=0.95),
+ _make_tool_search_result("mid_score", score=0.6),
+ ]
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(return_value=semantic_results)
+
+ mock_tools = [
+ _make_mock_tool("low_score"),
+ _make_mock_tool("high_score"),
+ _make_mock_tool("mid_score"),
+ ]
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {"query": "test"}))
+
+ names = [t["name"] for t in result["tools"]]
+ assert names == ["high_score", "mid_score", "low_score"]
+
+ def test_search_tools_pagination_offset_and_limit(self):
+ """Test pagination with offset and limit."""
+ service = MetaServerService()
+ # Create 5 results
+ semantic_results = [
+ _make_tool_search_result(f"tool_{i}", score=1.0 - i * 0.1)
+ for i in range(5)
+ ]
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(return_value=semantic_results)
+
+ mock_tools = [_make_mock_tool(f"tool_{i}") for i in range(5)]
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {
+ "query": "test", "limit": 2, "offset": 1,
+ }))
+
+ assert result["totalCount"] == 5
+ assert len(result["tools"]) == 2
+ assert result["hasMore"] is True
+
+ def test_search_tools_pagination_no_more_results(self):
+ """Test has_more is False when all results fit."""
+ service = MetaServerService()
+ semantic_results = [_make_tool_search_result("tool_a", score=0.8)]
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(return_value=semantic_results)
+
+ mock_tools = [_make_mock_tool("tool_a")]
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {
+ "query": "test", "limit": 50, "offset": 0,
+ }))
+
+ assert result["hasMore"] is False
+ assert result["totalCount"] == 1
+
+ def test_search_tools_pagination_offset_beyond_results(self):
+ """Test offset beyond total results returns empty tools list."""
+ service = MetaServerService()
+ semantic_results = [_make_tool_search_result("tool_a", score=0.8)]
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(return_value=semantic_results)
+
+ mock_tools = [_make_mock_tool("tool_a")]
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {
+ "query": "test", "limit": 10, "offset": 100,
+ }))
+
+ assert result["tools"] == []
+ assert result["totalCount"] == 1
+ assert result["hasMore"] is False
+
+ def test_search_tools_tag_filter(self):
+ """Test tag filtering narrows results to tools with matching tags."""
+ service = MetaServerService()
+ semantic_results = [
+ _make_tool_search_result("tagged_tool", score=0.9),
+ _make_tool_search_result("untagged_tool", score=0.8),
+ ]
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(return_value=semantic_results)
+
+ mock_tools = [
+ _make_mock_tool("tagged_tool", tags=["database"]),
+ _make_mock_tool("untagged_tool", tags=[]),
+ ]
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {
+ "query": "test", "tags": ["database"],
+ }))
+
+ tool_names = [t["name"] for t in result["tools"]]
+ assert "tagged_tool" in tool_names
+ assert "untagged_tool" not in tool_names
+
+ def test_search_tools_scope_filtering_applied(self):
+ """Test that scope filtering is applied to search results."""
+ service = MetaServerService()
+ semantic_results = [
+ _make_tool_search_result("public_tool", server_id="s1", score=0.9),
+ _make_tool_search_result("private_tool", server_id="s2", score=0.8),
+ ]
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(return_value=semantic_results)
+
+ mock_tools = [
+ _make_mock_tool("public_tool", visibility="public"),
+ _make_mock_tool("private_tool", visibility="private"),
+ ]
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {
+ "query": "test",
+ "scope": {"include_visibility": ["public"]},
+ }))
+
+ tool_names = [t["name"] for t in result["tools"]]
+ assert "public_tool" in tool_names
+ assert "private_tool" not in tool_names
+
+ def test_search_tools_keyword_exact_match_scores_highest(self):
+ """Test keyword search gives 1.0 score for exact name match."""
+ service = MetaServerService()
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(return_value=[])
+
+ exact_tool = _make_mock_tool("db_query")
+ partial_tool = _make_mock_tool("db_query_extended")
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools([exact_tool, partial_tool])),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {"query": "db_query"}))
+
+ # Exact match should be first (score 1.0 > 0.7)
+ if len(result["tools"]) >= 2:
+ assert result["tools"][0]["name"] == "db_query"
+
+ def test_search_tools_include_metrics_parameter_passed(self):
+ """Test that include_metrics is forwarded correctly."""
+ service = MetaServerService()
+ semantic_results = [_make_tool_search_result("tool_a", score=0.9)]
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(return_value=semantic_results)
+
+ mock_tools = [_make_mock_tool("tool_a")]
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {
+ "query": "test", "include_metrics": True,
+ }))
+
+ # Metrics are currently None (TODO), but the call should succeed
+ assert len(result["tools"]) == 1
+
+ def test_search_tools_response_is_camel_case(self):
+ """Test response uses camelCase aliases for serialization."""
+ service = MetaServerService()
+ mock_semantic = AsyncMock()
+ mock_semantic.search_tools = AsyncMock(return_value=[])
+
+ with (
+ patch("mcpgateway.meta_server.service.get_semantic_search_service", return_value=mock_semantic),
+ patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools([])),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("search_tools", {"query": "x"}))
+
+ assert "totalCount" in result
+ assert "hasMore" in result
+ assert "query" in result
+ assert "tools" in result
+
+
+# ---------------------------------------------------------------------------
+# get_similar_tools comprehensive tests
+# ---------------------------------------------------------------------------
+
+class TestGetSimilarToolsImplementation:
+ """Comprehensive tests for the _get_similar_tools implementation."""
+
+ def test_similar_tools_empty_tool_name_returns_empty(self):
+ """Test that empty tool_name returns empty results immediately."""
+ service = MetaServerService()
+ result = asyncio.run(service.handle_meta_tool_call("get_similar_tools", {"tool_name": ""}))
+ assert result["referenceTool"] == ""
+ assert result["similarTools"] == []
+ assert result["totalFound"] == 0
+
+ def test_similar_tools_tool_not_found(self):
+ """Test that a non-existent reference tool returns empty results."""
+ service = MetaServerService()
+
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools([])):
+ result = asyncio.run(service.handle_meta_tool_call("get_similar_tools", {"tool_name": "nonexistent"}))
+
+ assert result["referenceTool"] == "nonexistent"
+ assert result["similarTools"] == []
+ assert result["totalFound"] == 0
+
+ def test_similar_tools_no_embedding_returns_empty(self):
+ """Test that tool without embedding returns empty results."""
+ service = MetaServerService()
+ ref_tool = _make_mock_tool("my_tool")
+
+ call_count = [0]
+
+ def mock_get_db():
+ call_count[0] += 1
+ db = MagicMock()
+ query = db.query.return_value
+ filter_mock = MagicMock()
+ query.filter.return_value = filter_mock
+ filter_mock.filter.return_value = filter_mock
+
+ if call_count[0] == 1:
+ # First call: resolve reference tool
+ filter_mock.first.return_value = ref_tool
+ else:
+ # Second call: get embedding — return None
+ pass
+ yield db
+
+ mock_vector_service = MagicMock()
+ mock_vector_service.get_tool_embedding.return_value = None
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch("mcpgateway.meta_server.service.VectorSearchService", return_value=mock_vector_service),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("get_similar_tools", {"tool_name": "my_tool"}))
+
+ assert result["similarTools"] == []
+ assert result["totalFound"] == 0
+
+ def test_similar_tools_filters_out_reference_tool(self):
+ """Test that the reference tool itself is excluded from similar results."""
+ service = MetaServerService()
+ ref_tool = _make_mock_tool("my_tool")
+
+ similar_results = [
+ _make_tool_search_result("my_tool", score=1.0), # self — should be filtered
+ _make_tool_search_result("similar_tool_a", score=0.9),
+ _make_tool_search_result("similar_tool_b", score=0.8),
+ ]
+
+ call_count = [0]
+
+ def mock_get_db():
+ call_count[0] += 1
+ db = MagicMock()
+ query = db.query.return_value
+ filter_mock = MagicMock()
+ query.filter.return_value = filter_mock
+ filter_mock.filter.return_value = filter_mock
+ filter_mock.first.return_value = ref_tool
+ filter_mock.all.return_value = [
+ _make_mock_tool("similar_tool_a"),
+ _make_mock_tool("similar_tool_b"),
+ ]
+ yield db
+
+ mock_embedding = MagicMock()
+ mock_embedding.embedding = [0.1] * 128
+
+ mock_vector_service = MagicMock()
+ mock_vector_service.get_tool_embedding.return_value = mock_embedding
+ mock_vector_service.search_similar_tools = AsyncMock(return_value=similar_results)
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch("mcpgateway.meta_server.service.VectorSearchService", return_value=mock_vector_service),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("get_similar_tools", {"tool_name": "my_tool"}))
+
+ tool_names = [t["name"] for t in result["similarTools"]]
+ assert "my_tool" not in tool_names
+ assert "similar_tool_a" in tool_names
+ assert "similar_tool_b" in tool_names
+
+ def test_similar_tools_respects_limit(self):
+ """Test that limit parameter is respected."""
+ service = MetaServerService()
+ ref_tool = _make_mock_tool("my_tool")
+
+ similar_results = [
+ _make_tool_search_result(f"similar_{i}", score=0.9 - i * 0.1)
+ for i in range(5)
+ ]
+
+ call_count = [0]
+
+ def mock_get_db():
+ call_count[0] += 1
+ db = MagicMock()
+ query = db.query.return_value
+ filter_mock = MagicMock()
+ query.filter.return_value = filter_mock
+ filter_mock.filter.return_value = filter_mock
+ filter_mock.first.return_value = ref_tool
+ filter_mock.all.return_value = [_make_mock_tool(f"similar_{i}") for i in range(2)]
+ yield db
+
+ mock_embedding = MagicMock()
+ mock_embedding.embedding = [0.1] * 128
+
+ mock_vector_service = MagicMock()
+ mock_vector_service.get_tool_embedding.return_value = mock_embedding
+ mock_vector_service.search_similar_tools = AsyncMock(return_value=similar_results)
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch("mcpgateway.meta_server.service.VectorSearchService", return_value=mock_vector_service),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("get_similar_tools", {
+ "tool_name": "my_tool", "limit": 2,
+ }))
+
+ # Limit should cap the results (after self-filtering)
+ assert len(result["similarTools"]) <= 2
+
+ def test_similar_tools_scope_filtering_applied(self):
+ """Test that scope filtering is applied to similar tools results."""
+ service = MetaServerService()
+ ref_tool = _make_mock_tool("my_tool")
+
+ similar_results = [
+ _make_tool_search_result("public_similar", server_id="s1", score=0.9),
+ _make_tool_search_result("private_similar", server_id="s2", score=0.8),
+ ]
+
+ call_count = [0]
+
+ def mock_get_db():
+ call_count[0] += 1
+ db = MagicMock()
+ query = db.query.return_value
+ filter_mock = MagicMock()
+ query.filter.return_value = filter_mock
+ filter_mock.filter.return_value = filter_mock
+ filter_mock.first.return_value = ref_tool
+ filter_mock.all.return_value = [
+ _make_mock_tool("public_similar", visibility="public"),
+ _make_mock_tool("private_similar", visibility="private"),
+ ]
+ yield db
+
+ mock_embedding = MagicMock()
+ mock_embedding.embedding = [0.1] * 128
+
+ mock_vector_service = MagicMock()
+ mock_vector_service.get_tool_embedding.return_value = mock_embedding
+ mock_vector_service.search_similar_tools = AsyncMock(return_value=similar_results)
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch("mcpgateway.meta_server.service.VectorSearchService", return_value=mock_vector_service),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("get_similar_tools", {
+ "tool_name": "my_tool",
+ "scope": {"include_visibility": ["public"]},
+ }))
+
+ tool_names = [t["name"] for t in result["similarTools"]]
+ assert "public_similar" in tool_names
+ assert "private_similar" not in tool_names
+
+ def test_similar_tools_db_error_returns_empty(self):
+ """Test that DB error during tool lookup returns empty results gracefully."""
+ service = MetaServerService()
+
+ def broken_get_db():
+ raise RuntimeError("DB connection failed")
+ yield # noqa: unreachable
+
+ with patch("mcpgateway.meta_server.service.get_db", broken_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("get_similar_tools", {"tool_name": "my_tool"}))
+
+ assert result["similarTools"] == []
+ assert result["totalFound"] == 0
+
+ def test_similar_tools_response_is_camel_case(self):
+ """Test response uses camelCase aliases."""
+ service = MetaServerService()
+
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools([])):
+ result = asyncio.run(service.handle_meta_tool_call("get_similar_tools", {"tool_name": "x"}))
+
+ assert "referenceTool" in result
+ assert "similarTools" in result
+ assert "totalFound" in result
+
+
+# ---------------------------------------------------------------------------
+# _apply_scope_filtering tests (all 7 scope fields + AND semantics)
+# ---------------------------------------------------------------------------
+
+class TestApplyScopeFiltering:
+ """Tests for _apply_scope_filtering with all MetaToolScope fields."""
+
+ def setup_method(self):
+ """Create a service and standard test results."""
+ self.service = MetaServerService()
+ self.results = [
+ _make_tool_search_result("tool_a", server_id="server_1", score=0.9),
+ _make_tool_search_result("tool_b", server_id="server_2", score=0.8),
+ _make_tool_search_result("tool_c", server_id="server_1", score=0.7),
+ ]
+ self.mock_tools = [
+ _make_mock_tool("tool_a", tags=["database", "production"], visibility="public", team_id="team1"),
+ _make_mock_tool("tool_b", tags=["deprecated"], visibility="private", team_id="team2"),
+ _make_mock_tool("tool_c", tags=["database"], visibility="team", team_id="team1"),
+ ]
+
+ def test_no_scope_passes_all(self):
+ """Test that None scope passes all results through."""
+ result = self.service._apply_scope_filtering(self.results, None)
+ assert len(result) == 3
+
+ def test_empty_scope_passes_all(self):
+ """Test that empty scope dict passes all results through."""
+ result = self.service._apply_scope_filtering(self.results, {})
+ assert len(result) == 3
+
+ def test_include_tags_filter(self):
+ """Test include_tags: tool must have at least one matching tag."""
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(self.mock_tools)):
+ result = self.service._apply_scope_filtering(self.results, {"include_tags": ["production"]})
+ names = [r.tool_name for r in result]
+ assert "tool_a" in names # has "production"
+ assert "tool_b" not in names # has "deprecated" only
+ assert "tool_c" not in names # has "database" only
+
+ def test_exclude_tags_filter(self):
+ """Test exclude_tags: tool must NOT have any excluded tag."""
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(self.mock_tools)):
+ result = self.service._apply_scope_filtering(self.results, {"exclude_tags": ["deprecated"]})
+ names = [r.tool_name for r in result]
+ assert "tool_a" in names
+ assert "tool_b" not in names # has "deprecated"
+ assert "tool_c" in names
+
+ def test_include_servers_filter(self):
+ """Test include_servers: tool must be from one of these servers."""
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(self.mock_tools)):
+ result = self.service._apply_scope_filtering(self.results, {"include_servers": ["server_1"]})
+ names = [r.tool_name for r in result]
+ assert "tool_a" in names # server_1
+ assert "tool_b" not in names # server_2
+ assert "tool_c" in names # server_1
+
+ def test_exclude_servers_filter(self):
+ """Test exclude_servers: tool must NOT be from excluded servers."""
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(self.mock_tools)):
+ result = self.service._apply_scope_filtering(self.results, {"exclude_servers": ["server_2"]})
+ names = [r.tool_name for r in result]
+ assert "tool_a" in names
+ assert "tool_b" not in names # server_2
+ assert "tool_c" in names
+
+ def test_include_visibility_filter(self):
+ """Test include_visibility: tool must have one of these visibility levels."""
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(self.mock_tools)):
+ result = self.service._apply_scope_filtering(self.results, {"include_visibility": ["public"]})
+ names = [r.tool_name for r in result]
+ assert "tool_a" in names # public
+ assert "tool_b" not in names # private
+ assert "tool_c" not in names # team
+
+ def test_include_teams_filter(self):
+ """Test include_teams: tool must belong to one of these teams."""
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(self.mock_tools)):
+ result = self.service._apply_scope_filtering(self.results, {"include_teams": ["team1"]})
+ names = [r.tool_name for r in result]
+ assert "tool_a" in names # team1
+ assert "tool_b" not in names # team2
+ assert "tool_c" in names # team1
+
+ def test_name_patterns_filter(self):
+ """Test name_patterns: tool name must match at least one glob pattern."""
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(self.mock_tools)):
+ result = self.service._apply_scope_filtering(self.results, {"name_patterns": ["tool_a"]})
+ names = [r.tool_name for r in result]
+ assert names == ["tool_a"]
+
+ def test_name_patterns_wildcard(self):
+ """Test name_patterns with glob wildcards."""
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(self.mock_tools)):
+ result = self.service._apply_scope_filtering(self.results, {"name_patterns": ["tool_*"]})
+ assert len(result) == 3 # All match tool_*
+
+ def test_combined_and_semantics(self):
+ """Test that multiple scope fields combine with AND semantics."""
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(self.mock_tools)):
+ result = self.service._apply_scope_filtering(self.results, {
+ "include_tags": ["database"],
+ "include_visibility": ["public"],
+ "include_teams": ["team1"],
+ })
+ names = [r.tool_name for r in result]
+ # Only tool_a has database tag AND public visibility AND team1
+ assert names == ["tool_a"]
+
+ def test_scope_excludes_tool_not_in_db(self):
+ """Test that tools not found in DB are excluded from scoped results."""
+ # Only return tool_a from DB — tool_b, tool_c should be excluded
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools([self.mock_tools[0]])):
+ result = self.service._apply_scope_filtering(self.results, {"include_tags": ["database"]})
+ names = [r.tool_name for r in result]
+ assert "tool_a" in names
+ assert "tool_b" not in names
+ assert "tool_c" not in names
+
+ def test_scope_empty_results_input(self):
+ """Test scope filtering with empty results list."""
+ result = self.service._apply_scope_filtering([], {"include_tags": ["database"]})
+ assert result == []
+
+ def test_scope_all_fields_combined_strict(self):
+ """Test that strict AND across all 7 fields filters aggressively."""
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(self.mock_tools)):
+ result = self.service._apply_scope_filtering(self.results, {
+ "include_tags": ["database"],
+ "exclude_tags": ["deprecated"],
+ "include_servers": ["server_1"],
+ "exclude_servers": ["server_3"], # doesn't affect any
+ "include_visibility": ["public", "team"],
+ "include_teams": ["team1"],
+ "name_patterns": ["tool_*"],
+ })
+ names = [r.tool_name for r in result]
+ # tool_a: database=✓, not deprecated=✓, server_1=✓, public=✓, team1=✓, tool_*=✓ → ✓
+ # tool_b: deprecated=✗ (excluded by exclude_tags)
+ # tool_c: database=✓, not deprecated=✓, server_1=✓, team=✓, team1=✓, tool_*=✓ → ✓
+ assert "tool_a" in names
+ assert "tool_c" in names
+ assert "tool_b" not in names
+
+
+# ---------------------------------------------------------------------------
+# _get_tool_metadata tests
+# ---------------------------------------------------------------------------
+
+class TestGetToolMetadata:
+ """Tests for _get_tool_metadata helper."""
+
+ def test_returns_metadata_for_found_tools(self):
+ """Test that metadata is returned for tools found in DB."""
+ service = MetaServerService()
+ mock_tools = [
+ _make_mock_tool("tool_a", tags=["db"], visibility="public", team_id="t1", input_schema={"type": "object"}),
+ ]
+
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)):
+ result = service._get_tool_metadata(["tool_a"])
+
+ assert "tool_a" in result
+ assert result["tool_a"]["tags"] == ["db"]
+ assert result["tool_a"]["visibility"] == "public"
+ assert result["tool_a"]["team_id"] == "t1"
+ assert result["tool_a"]["input_schema"] == {"type": "object"}
+
+ def test_empty_input_returns_empty(self):
+ """Test that empty tool names list returns empty dict."""
+ service = MetaServerService()
+ result = service._get_tool_metadata([])
+ assert result == {}
+
+ def test_db_error_returns_empty(self):
+ """Test that DB error returns empty dict gracefully."""
+ service = MetaServerService()
+
+ def broken_get_db():
+ raise RuntimeError("DB down")
+ yield # noqa: unreachable
+
+ with patch("mcpgateway.meta_server.service.get_db", broken_get_db):
+ result = service._get_tool_metadata(["tool_a"])
+
+ assert result == {}
+
+ def test_missing_tool_not_in_result(self):
+ """Test that tools not in DB are not in result dict."""
+ service = MetaServerService()
+ mock_tools = [_make_mock_tool("tool_a")]
+
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)):
+ result = service._get_tool_metadata(["tool_a", "tool_missing"])
+
+ assert "tool_a" in result
+ assert "tool_missing" not in result
+
+ def test_null_tags_default_to_empty_list(self):
+ """Test that tools with None tags default to empty list."""
+ service = MetaServerService()
+ mock_tools = [_make_mock_tool("tool_a", tags=None)]
+
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)):
+ result = service._get_tool_metadata(["tool_a"])
+
+ assert result["tool_a"]["tags"] == []
+
+
+# ---------------------------------------------------------------------------
+# _get_tools_matching_tags tests
+# ---------------------------------------------------------------------------
+
+class TestGetToolsMatchingTags:
+ """Tests for _get_tools_matching_tags helper."""
+
+ def test_returns_matching_tool_names(self):
+ """Test that tools with matching tags are returned."""
+ service = MetaServerService()
+ mock_tools = [
+ _make_mock_tool("tool_a", tags=["database", "prod"]),
+ _make_mock_tool("tool_b", tags=["messaging"]),
+ _make_mock_tool("tool_c", tags=["database"]),
+ ]
+
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)):
+ result = service._get_tools_matching_tags(["database"])
+
+ assert "tool_a" in result
+ assert "tool_c" in result
+ assert "tool_b" not in result
+
+ def test_no_matching_tags_returns_empty(self):
+ """Test that no matching tags returns empty set."""
+ service = MetaServerService()
+ mock_tools = [_make_mock_tool("tool_a", tags=["other"])]
+
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)):
+ result = service._get_tools_matching_tags(["nonexistent"])
+
+ assert len(result) == 0
+
+ def test_db_error_returns_empty_set(self):
+ """Test that DB error returns empty set gracefully."""
+ service = MetaServerService()
+
+ def broken_get_db():
+ raise RuntimeError("DB down")
+ yield # noqa: unreachable
+
+ with patch("mcpgateway.meta_server.service.get_db", broken_get_db):
+ result = service._get_tools_matching_tags(["database"])
+
+ assert result == set()
+
+
+# ---------------------------------------------------------------------------
+# _map_to_tool_summaries tests
+# ---------------------------------------------------------------------------
+
+class TestMapToToolSummaries:
+ """Tests for _map_to_tool_summaries helper."""
+
+ def test_maps_results_to_summaries(self):
+ """Test that ToolSearchResult objects are mapped to ToolSummary objects."""
+ service = MetaServerService()
+ results = [
+ _make_tool_search_result("tool_a", description="Tool A desc", server_id="s1", server_name="Server1"),
+ ]
+ mock_tools = [
+ _make_mock_tool("tool_a", tags=["db"], input_schema={"type": "object"}),
+ ]
+
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)):
+ summaries = service._map_to_tool_summaries(results)
+
+ assert len(summaries) == 1
+ assert summaries[0].name == "tool_a"
+ assert summaries[0].description == "Tool A desc"
+ assert summaries[0].server_id == "s1"
+ assert summaries[0].server_name == "Server1"
+ assert summaries[0].tags == ["db"]
+ assert summaries[0].input_schema == {"type": "object"}
+
+ def test_empty_results_returns_empty(self):
+ """Test that empty results list returns empty summaries list."""
+ service = MetaServerService()
+ summaries = service._map_to_tool_summaries([])
+ assert summaries == []
+
+ def test_tool_not_in_db_gets_default_metadata(self):
+ """Test that tools not found in DB get default empty metadata."""
+ service = MetaServerService()
+ results = [_make_tool_search_result("missing_tool")]
+
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools([])):
+ summaries = service._map_to_tool_summaries(results)
+
+ assert len(summaries) == 1
+ assert summaries[0].name == "missing_tool"
+ assert summaries[0].tags == []
+ assert summaries[0].input_schema is None
+
+ def test_multiple_results_mapped_in_order(self):
+ """Test that multiple results preserve order."""
+ service = MetaServerService()
+ results = [
+ _make_tool_search_result("tool_a", score=0.9),
+ _make_tool_search_result("tool_b", score=0.8),
+ _make_tool_search_result("tool_c", score=0.7),
+ ]
+ mock_tools = [
+ _make_mock_tool("tool_a"),
+ _make_mock_tool("tool_b"),
+ _make_mock_tool("tool_c"),
+ ]
+
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)):
+ summaries = service._map_to_tool_summaries(results)
+
+ assert [s.name for s in summaries] == ["tool_a", "tool_b", "tool_c"]
+
+ def test_metrics_is_none_by_default(self):
+ """Test that metrics is None (TODO pending ToolMetric implementation)."""
+ service = MetaServerService()
+ results = [_make_tool_search_result("tool_a")]
+ mock_tools = [_make_mock_tool("tool_a")]
+
+ with patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_tools)):
+ summaries = service._map_to_tool_summaries(results, include_metrics=True)
+
+ assert summaries[0].metrics is None
+
+# ---------------------------------------------------------------------------
+# list_tools comprehensive tests
+# ---------------------------------------------------------------------------
+
+
+class TestListToolsImplementation:
+ """Comprehensive tests for the _list_tools implementation."""
+
+ def test_list_tools_returns_results(self):
+ """Test that list_tools returns tools from ToolService."""
+ service = MetaServerService()
+
+ # Create mock ToolRead objects
+ mock_tool_a = MagicMock()
+ mock_tool_a.name = "tool_a"
+ mock_tool_a.description = "Tool A description"
+ mock_tool_a.gateway = SimpleNamespace(id="server_1", name="Server 1")
+ mock_tool_a.tags = ["database"]
+ mock_tool_a.input_schema = {"type": "object"}
+
+ mock_tool_b = MagicMock()
+ mock_tool_b.name = "tool_b"
+ mock_tool_b.description = "Tool B description"
+ mock_tool_b.gateway = SimpleNamespace(id="server_1", name="Server 1")
+ mock_tool_b.tags = ["api"]
+ mock_tool_b.input_schema = {"type": "object"}
+
+ tools = [mock_tool_a, mock_tool_b]
+
+ def mock_get_db():
+ db = MagicMock()
+ yield db
+
+ mock_db_tools = [
+ _make_mock_tool("tool_a", tags=["database"], input_schema={"type": "object"}),
+ _make_mock_tool("tool_b", tags=["api"], input_schema={"type": "object"}),
+ ]
+
+ from mcpgateway.services.tool_service import ToolService
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch.object(ToolService, "list_tools", new_callable=AsyncMock, return_value=(tools, None)),
+ patch.object(service, "_get_tool_metadata", return_value={
+ "tool_a": {"tags": ["database"], "input_schema": {"type": "object"}, "visibility": "public", "team_id": None},
+ "tool_b": {"tags": ["api"], "input_schema": {"type": "object"}, "visibility": "public", "team_id": None},
+ }),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("list_tools", {}))
+
+ assert result["totalCount"] == 2
+ assert len(result["tools"]) == 2
+ tool_names = [t["name"] for t in result["tools"]]
+ assert "tool_a" in tool_names
+ assert "tool_b" in tool_names
+
+ def test_list_tools_with_pagination(self):
+ """Test list_tools respects limit and offset."""
+ service = MetaServerService()
+
+ # Create 5 mock tools
+ tools = []
+ for i in range(5):
+ tool = MagicMock()
+ tool.name = f"tool_{i}"
+ tool.description = f"Tool {i}"
+ tool.gateway = SimpleNamespace(id="server_1", name="Server 1")
+ tool.tags = []
+ tool.input_schema = {}
+ tools.append(tool)
+
+ def mock_get_db():
+ db = MagicMock()
+ yield db
+
+ mock_db_tools = [_make_mock_tool(f"tool_{i}") for i in range(5)]
+ metadata = {f"tool_{i}": {"tags": [], "input_schema": {}, "visibility": "public", "team_id": None} for i in range(5)}
+
+ from mcpgateway.services.tool_service import ToolService
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch.object(ToolService, "list_tools", new_callable=AsyncMock, return_value=(tools, None)),
+ patch.object(service, "_get_tool_metadata", return_value=metadata),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("list_tools", {"limit": 2, "offset": 1}))
+
+ assert result["totalCount"] == 5
+ assert len(result["tools"]) == 2
+ assert result["hasMore"] is True
+
+ def test_list_tools_with_tag_filter(self):
+ """Test list_tools respects tag filter."""
+ service = MetaServerService()
+
+ # Create tools with different tags
+ tool_a = MagicMock()
+ tool_a.name = "db_tool"
+ tool_a.description = "Database tool"
+ tool_a.gateway = SimpleNamespace(id="s1", name="Server 1")
+ tool_a.tags = ["database"]
+ tool_a.input_schema = {}
+
+ tool_b = MagicMock()
+ tool_b.name = "api_tool"
+ tool_b.description = "API tool"
+ tool_b.gateway = SimpleNamespace(id="s1", name="Server 1")
+ tool_b.tags = ["api"]
+ tool_b.input_schema = {}
+
+ tools = [tool_a, tool_b]
+
+ def mock_get_db():
+ db = MagicMock()
+ yield db
+
+ metadata = {
+ "db_tool": {"tags": ["database"], "input_schema": {}, "visibility": "public", "team_id": None},
+ "api_tool": {"tags": ["api"], "input_schema": {}, "visibility": "public", "team_id": None},
+ }
+
+ from mcpgateway.services.tool_service import ToolService
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch.object(ToolService, "list_tools", new_callable=AsyncMock, return_value=(tools, None)),
+ patch.object(service, "_get_tool_metadata", return_value=metadata),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("list_tools", {"tags": ["database"]}))
+
+ # ToolService.list_tools should be called with tags filter
+ # The implementation passes tags to the service
+ assert "tools" in result
+
+ def test_list_tools_with_server_filter(self):
+ """Test list_tools respects server_id filter."""
+ service = MetaServerService()
+
+ tool = MagicMock()
+ tool.name = "tool_a"
+ tool.description = "Tool A"
+ tool.gateway = SimpleNamespace(id="server_1", name="Server 1")
+ tool.tags = []
+ tool.input_schema = {}
+
+ def mock_get_db():
+ db = MagicMock()
+ yield db
+
+ metadata = {
+ "tool_a": {"tags": [], "input_schema": {}, "visibility": "public", "team_id": None},
+ }
+
+ from mcpgateway.services.tool_service import ToolService
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch.object(ToolService, "list_tools", new_callable=AsyncMock, return_value=([tool], None)),
+ patch.object(service, "_get_tool_metadata", return_value=metadata),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("list_tools", {"server_id": "server_1"}))
+
+ assert len(result["tools"]) == 1
+
+ def test_list_tools_with_sorting(self):
+ """Test list_tools respects sort_by and sort_order."""
+ service = MetaServerService()
+
+ # Create mock tools (already sorted by ToolService)
+ tools = []
+ for i, name in enumerate(["alpha", "beta", "gamma"]):
+ tool = MagicMock()
+ tool.name = name
+ tool.description = f"Tool {name}"
+ tool.gateway = SimpleNamespace(id="s1", name="Server 1")
+ tool.tags = []
+ tool.input_schema = {}
+ tools.append(tool)
+
+ def mock_get_db():
+ db = MagicMock()
+ yield db
+
+ metadata = {
+ name: {"tags": [], "input_schema": {}, "visibility": "public", "team_id": None}
+ for name in ["alpha", "beta", "gamma"]
+ }
+
+ from mcpgateway.services.tool_service import ToolService
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch.object(ToolService, "list_tools", new_callable=AsyncMock, return_value=(tools, None)) as mock_list,
+ patch.object(service, "_get_tool_metadata", return_value=metadata),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("list_tools", {
+ "sort_by": "name",
+ "sort_order": "asc",
+ }))
+
+ # Verify ToolService.list_tools was called with correct sort params
+ mock_list.assert_called_once()
+ call_kwargs = mock_list.call_args.kwargs
+ assert call_kwargs["sort_by"] == "name"
+ assert call_kwargs["sort_order"] == "asc"
+
+ def test_list_tools_scope_filtering_applied(self):
+ """Test that scope filtering is applied to list results."""
+ service = MetaServerService()
+
+ # Create tools with different visibility
+ public_tool = MagicMock()
+ public_tool.name = "public_tool"
+ public_tool.description = "Public tool"
+ public_tool.gateway = SimpleNamespace(id="s1", name="Server 1")
+ public_tool.tags = []
+ public_tool.input_schema = {}
+
+ private_tool = MagicMock()
+ private_tool.name = "private_tool"
+ private_tool.description = "Private tool"
+ private_tool.gateway = SimpleNamespace(id="s1", name="Server 1")
+ private_tool.tags = []
+ private_tool.input_schema = {}
+
+ tools = [public_tool, private_tool]
+
+ def mock_get_db():
+ db = MagicMock()
+ yield db
+
+ mock_db_tools = [
+ _make_mock_tool("public_tool", visibility="public"),
+ _make_mock_tool("private_tool", visibility="private"),
+ ]
+
+ metadata = {
+ "public_tool": {"tags": [], "input_schema": {}, "visibility": "public", "team_id": None},
+ "private_tool": {"tags": [], "input_schema": {}, "visibility": "private", "team_id": None},
+ }
+
+ from mcpgateway.services.tool_service import ToolService
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", _mock_get_db_with_tools(mock_db_tools)),
+ patch.object(ToolService, "list_tools", new_callable=AsyncMock, return_value=(tools, None)),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("list_tools", {
+ "scope": {"include_visibility": ["public"]},
+ }))
+
+ tool_names = [t["name"] for t in result["tools"]]
+ assert "public_tool" in tool_names
+ assert "private_tool" not in tool_names
+
+ def test_list_tools_db_error_returns_empty(self):
+ """Test list_tools returns empty result gracefully on DB error."""
+ service = MetaServerService()
+
+ def broken_get_db():
+ raise RuntimeError("DB connection failed")
+ yield # noqa: unreachable
+
+ with patch("mcpgateway.meta_server.service.get_db", broken_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("list_tools", {}))
+
+ assert result["tools"] == []
+ assert result["totalCount"] == 0
+ assert result["hasMore"] is False
+
+ def test_list_tools_offset_beyond_total_returns_empty(self):
+ """Test offset beyond total count returns empty tools list."""
+ service = MetaServerService()
+
+ tool = MagicMock()
+ tool.name = "tool_a"
+ tool.description = "Tool A"
+ tool.gateway = SimpleNamespace(id="s1", name="Server 1")
+ tool.tags = []
+ tool.input_schema = {}
+
+ def mock_get_db():
+ db = MagicMock()
+ yield db
+
+ metadata = {
+ "tool_a": {"tags": [], "input_schema": {}, "visibility": "public", "team_id": None},
+ }
+
+ from mcpgateway.services.tool_service import ToolService
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch.object(ToolService, "list_tools", new_callable=AsyncMock, return_value=([tool], None)),
+ patch.object(service, "_get_tool_metadata", return_value=metadata),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("list_tools", {"offset": 100}))
+
+ assert result["tools"] == []
+ assert result["totalCount"] == 1
+ assert result["hasMore"] is False
+
+ def test_list_tools_include_schema_parameter(self):
+ """Test include_schema parameter is passed through."""
+ service = MetaServerService()
+
+ tool = MagicMock()
+ tool.name = "tool_a"
+ tool.description = "Tool A"
+ tool.gateway = SimpleNamespace(id="s1", name="Server 1")
+ tool.tags = []
+ tool.input_schema = {"type": "object", "properties": {"arg": {"type": "string"}}}
+
+ def mock_get_db():
+ db = MagicMock()
+ yield db
+
+ metadata = {
+ "tool_a": {"tags": [], "input_schema": {"type": "object"}, "visibility": "public", "team_id": None},
+ }
+
+ from mcpgateway.services.tool_service import ToolService
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch.object(ToolService, "list_tools", new_callable=AsyncMock, return_value=([tool], None)) as mock_list,
+ patch.object(service, "_get_tool_metadata", return_value=metadata),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("list_tools", {"include_schema": True}))
+
+ # Verify ToolService.list_tools was called with include_schema=True
+ mock_list.assert_called_once()
+ assert mock_list.call_args.kwargs["include_schema"] is True
+
+ def test_list_tools_response_is_camel_case(self):
+ """Test response uses camelCase aliases for serialization."""
+ service = MetaServerService()
+
+ def mock_get_db():
+ db = MagicMock()
+ yield db
+
+ from mcpgateway.services.tool_service import ToolService
+
+ with (
+ patch("mcpgateway.meta_server.service.get_db", mock_get_db),
+ patch.object(ToolService, "list_tools", new_callable=AsyncMock, return_value=([], None)),
+ ):
+ result = asyncio.run(service.handle_meta_tool_call("list_tools", {}))
+
+ assert "totalCount" in result
+ assert "hasMore" in result
+
+
+# ------------------------------------------------------------------
+# Tests for list_resources / read_resource / list_prompts / get_prompt
+# ------------------------------------------------------------------
+
+
+def _make_mock_resource(uri="resource://test", name="test-resource", description="A test resource",
+ mime_type="text/markdown", text_content="# Hello", tags=None, enabled=True, size=7):
+ """Create a mock Resource object."""
+ r = MagicMock()
+ r.uri = uri
+ r.name = name
+ r.description = description
+ r.mime_type = mime_type
+ r.text_content = text_content
+ r.binary_content = None
+ r.tags = tags or []
+ r.enabled = enabled
+ r.size = size
+ r.created_at = None
+ return r
+
+
+def _make_mock_prompt(name="test-prompt", description="A test prompt", template="Hello {name}",
+ argument_schema=None, tags=None, enabled=True):
+ """Create a mock Prompt object."""
+ p = MagicMock()
+ p.name = name
+ p.description = description
+ p.template = template
+ p.argument_schema = argument_schema or {"type": "object", "properties": {"name": {"type": "string"}}, "required": ["name"]}
+ p.tags = tags or []
+ p.enabled = enabled
+ p.created_at = None
+ return p
+
+
+class TestListResourcesMetaTool:
+ """Tests for list_resources meta-tool."""
+
+ def test_list_resources_returns_results(self):
+ """Test that list_resources returns resources from DB."""
+ service = MetaServerService()
+ mock_resources = [
+ _make_mock_resource(uri="resource://a", name="res-a", tags=["guide"]),
+ _make_mock_resource(uri="resource://b", name="res-b", tags=["docs"]),
+ ]
+
+ mock_db = MagicMock()
+ mock_query = MagicMock()
+ mock_query.filter.return_value = mock_query
+ mock_query.order_by.return_value = mock_query
+ mock_query.all.return_value = mock_resources
+ mock_db.query.return_value = mock_query
+
+ def mock_get_db():
+ yield mock_db
+
+ with patch("mcpgateway.meta_server.service.get_db", mock_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("list_resources", {}))
+
+ assert result["totalCount"] == 2
+ assert len(result["resources"]) == 2
+ assert result["resources"][0]["uri"] == "resource://a"
+ assert result["resources"][1]["uri"] == "resource://b"
+
+ def test_list_resources_with_tag_filter(self):
+ """Test that tag filtering works."""
+ service = MetaServerService()
+ mock_resources = [
+ _make_mock_resource(uri="resource://a", name="res-a", tags=["guide"]),
+ _make_mock_resource(uri="resource://b", name="res-b", tags=["docs"]),
+ ]
+
+ mock_db = MagicMock()
+ mock_query = MagicMock()
+ mock_query.filter.return_value = mock_query
+ mock_query.order_by.return_value = mock_query
+ mock_query.all.return_value = mock_resources
+ mock_db.query.return_value = mock_query
+
+ def mock_get_db():
+ yield mock_db
+
+ with patch("mcpgateway.meta_server.service.get_db", mock_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("list_resources", {"tags": ["guide"]}))
+
+ assert result["totalCount"] == 1
+ assert result["resources"][0]["name"] == "res-a"
+
+ def test_list_resources_empty(self):
+ """Test list_resources with no results."""
+ service = MetaServerService()
+
+ mock_db = MagicMock()
+ mock_query = MagicMock()
+ mock_query.filter.return_value = mock_query
+ mock_query.order_by.return_value = mock_query
+ mock_query.all.return_value = []
+ mock_db.query.return_value = mock_query
+
+ def mock_get_db():
+ yield mock_db
+
+ with patch("mcpgateway.meta_server.service.get_db", mock_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("list_resources", {}))
+
+ assert result["totalCount"] == 0
+ assert result["resources"] == []
+ assert result["hasMore"] is False
+
+ def test_list_resources_pagination(self):
+ """Test list_resources with offset and limit."""
+ service = MetaServerService()
+ mock_resources = [
+ _make_mock_resource(uri=f"resource://{i}", name=f"res-{i}")
+ for i in range(5)
+ ]
+
+ mock_db = MagicMock()
+ mock_query = MagicMock()
+ mock_query.filter.return_value = mock_query
+ mock_query.order_by.return_value = mock_query
+ mock_query.all.return_value = mock_resources
+ mock_db.query.return_value = mock_query
+
+ def mock_get_db():
+ yield mock_db
+
+ with patch("mcpgateway.meta_server.service.get_db", mock_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("list_resources", {"limit": 2, "offset": 0}))
+
+ assert result["totalCount"] == 5
+ assert len(result["resources"]) == 2
+ assert result["hasMore"] is True
+
+
+class TestReadResourceMetaTool:
+ """Tests for read_resource meta-tool."""
+
+ def test_read_resource_returns_content(self):
+ """Test that read_resource returns text content."""
+ service = MetaServerService()
+ mock_resource = _make_mock_resource(
+ uri="resource://test/guide",
+ name="guide",
+ text_content="# Guide\nThis is the content.",
+ )
+
+ mock_db = MagicMock()
+ mock_query = MagicMock()
+ mock_query.filter.return_value = mock_query
+ mock_query.first.return_value = mock_resource
+ mock_db.query.return_value = mock_query
+
+ def mock_get_db():
+ yield mock_db
+
+ with patch("mcpgateway.meta_server.service.get_db", mock_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("read_resource", {"uri": "resource://test/guide"}))
+
+ assert result["uri"] == "resource://test/guide"
+ assert result["name"] == "guide"
+ assert "Guide" in result["text"]
+
+ def test_read_resource_not_found(self):
+ """Test read_resource with unknown URI."""
+ service = MetaServerService()
+
+ mock_db = MagicMock()
+ mock_query = MagicMock()
+ mock_query.filter.return_value = mock_query
+ mock_query.first.return_value = None
+ mock_db.query.return_value = mock_query
+
+ def mock_get_db():
+ yield mock_db
+
+ with patch("mcpgateway.meta_server.service.get_db", mock_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("read_resource", {"uri": "resource://not/found"}))
+
+ assert result["uri"] == "resource://not/found"
+ assert "not found" in result["text"].lower()
+
+ def test_read_resource_empty_uri(self):
+ """Test read_resource with empty URI returns error."""
+ service = MetaServerService()
+ result = asyncio.run(service.handle_meta_tool_call("read_resource", {"uri": ""}))
+ assert "required" in result["text"].lower()
+
+
+class TestListPromptsMetaTool:
+ """Tests for list_prompts meta-tool."""
+
+ def test_list_prompts_returns_results(self):
+ """Test that list_prompts returns prompts from DB."""
+ service = MetaServerService()
+ mock_prompts = [
+ _make_mock_prompt(name="summarize", description="Summarize text", tags=["utility"]),
+ _make_mock_prompt(name="translate", description="Translate text", tags=["language"]),
+ ]
+
+ mock_db = MagicMock()
+ mock_query = MagicMock()
+ mock_query.filter.return_value = mock_query
+ mock_query.order_by.return_value = mock_query
+ mock_query.all.return_value = mock_prompts
+ mock_db.query.return_value = mock_query
+
+ def mock_get_db():
+ yield mock_db
+
+ with patch("mcpgateway.meta_server.service.get_db", mock_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("list_prompts", {}))
+
+ assert result["totalCount"] == 2
+ assert len(result["prompts"]) == 2
+ assert result["prompts"][0]["name"] == "summarize"
+
+ def test_list_prompts_empty(self):
+ """Test list_prompts with no results."""
+ service = MetaServerService()
+
+ mock_db = MagicMock()
+ mock_query = MagicMock()
+ mock_query.filter.return_value = mock_query
+ mock_query.order_by.return_value = mock_query
+ mock_query.all.return_value = []
+ mock_db.query.return_value = mock_query
+
+ def mock_get_db():
+ yield mock_db
+
+ with patch("mcpgateway.meta_server.service.get_db", mock_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("list_prompts", {}))
+
+ assert result["totalCount"] == 0
+ assert result["prompts"] == []
+
+
+class TestGetPromptMetaTool:
+ """Tests for get_prompt meta-tool."""
+
+ def test_get_prompt_returns_template(self):
+ """Test that get_prompt returns prompt template."""
+ service = MetaServerService()
+ mock_prompt = _make_mock_prompt(name="greet", template="Hello {name}!")
+
+ mock_db = MagicMock()
+ mock_query = MagicMock()
+ mock_query.filter.return_value = mock_query
+ mock_query.first.return_value = mock_prompt
+ mock_db.query.return_value = mock_query
+
+ def mock_get_db():
+ yield mock_db
+
+ with patch("mcpgateway.meta_server.service.get_db", mock_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("get_prompt", {"name": "greet"}))
+
+ assert result["name"] == "greet"
+ assert result["template"] == "Hello {name}!"
+ assert result["rendered"] is None
+
+ def test_get_prompt_with_rendering(self):
+ """Test that get_prompt renders template with arguments."""
+ service = MetaServerService()
+ mock_prompt = _make_mock_prompt(name="greet", template="Hello {name}!")
+ mock_prompt.validate_arguments = MagicMock()
+
+ mock_db = MagicMock()
+ mock_query = MagicMock()
+ mock_query.filter.return_value = mock_query
+ mock_query.first.return_value = mock_prompt
+ mock_db.query.return_value = mock_query
+
+ def mock_get_db():
+ yield mock_db
+
+ with patch("mcpgateway.meta_server.service.get_db", mock_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("get_prompt", {
+ "name": "greet",
+ "arguments": {"name": "World"},
+ }))
+
+ assert result["name"] == "greet"
+ assert result["rendered"] == "Hello World!"
+
+ def test_get_prompt_not_found(self):
+ """Test get_prompt with unknown name."""
+ service = MetaServerService()
+
+ mock_db = MagicMock()
+ mock_query = MagicMock()
+ mock_query.filter.return_value = mock_query
+ mock_query.first.return_value = None
+ mock_db.query.return_value = mock_query
+
+ def mock_get_db():
+ yield mock_db
+
+ with patch("mcpgateway.meta_server.service.get_db", mock_get_db):
+ result = asyncio.run(service.handle_meta_tool_call("get_prompt", {"name": "nonexistent"}))
+
+ assert result["name"] == "nonexistent"
+ assert "not found" in result["description"].lower()
+
+ def test_get_prompt_empty_name(self):
+ """Test get_prompt with empty name returns error."""
+ service = MetaServerService()
+ result = asyncio.run(service.handle_meta_tool_call("get_prompt", {"name": ""}))
+ assert "required" in result["description"].lower()
+
+
+class TestMetaToolDefinitionsIncludeNewTools:
+ """Test that META_TOOL_DEFINITIONS includes the 4 new meta-tools."""
+
+ def test_list_resources_in_definitions(self):
+ assert "list_resources" in META_TOOL_DEFINITIONS
+
+ def test_read_resource_in_definitions(self):
+ assert "read_resource" in META_TOOL_DEFINITIONS
+
+ def test_list_prompts_in_definitions(self):
+ assert "list_prompts" in META_TOOL_DEFINITIONS
+
+ def test_get_prompt_in_definitions(self):
+ assert "get_prompt" in META_TOOL_DEFINITIONS
+
+ def test_total_meta_tools_is_11(self):
+ assert len(META_TOOL_DEFINITIONS) == 11
+
+ def test_service_returns_11_definitions(self):
+ service = MetaServerService()
+ defs = service.get_meta_tool_definitions()
+ assert len(defs) == 11
+ names = {d["name"] for d in defs}
+ assert "list_resources" in names
+ assert "read_resource" in names
+ assert "list_prompts" in names
+ assert "get_prompt" in names
+ assert "tools" in result
\ No newline at end of file
From 646b16a1fa6752f648b557f376f3e56bb985a5ec Mon Sep 17 00:00:00 2001
From: Olivier Gintrand
Date: Thu, 16 Apr 2026 10:11:58 +0200
Subject: [PATCH 02/12] fix: enforce access control on all meta-server handlers
All 12 meta-server tool handlers now enforce visibility-based access
control using user_email and token_teams from the JWT context.
Previously fixed (execute_tool, authorize_gateway, authorize_all_gateways)
already passed user context correctly. The following handlers were
bypassing access control entirely:
- _list_tools: now passes user_email/token_teams to ToolService.list_tools()
- _search_tools: replaced raw db.query(Tool) with ToolService.list_tools()
- _stub_describe_tool: passes user_email/token_teams to MetaToolService
- _get_similar_tools: filters vector search results against accessible
tool set via ToolService.list_tools()
- _list_resources: applies ResourceService._apply_access_control() to query
- _read_resource: checks ResourceService._check_resource_access() before
returning content
- _list_prompts: applies PromptService._apply_access_control() to query
- _get_prompt: checks PromptService._check_prompt_access() before
returning content
Adds _extract_user_context() static helper for consistent extraction
of (user_email, token_teams, request_headers) from handler kwargs.
_get_tool_categories remains hardcoded to actor_scope=public_user which
is already restrictive by default.
Signed-off-by: Olivier Gintrand
---
mcpgateway/meta_server/service.py | 149 ++++++++++++++++++++++++++----
1 file changed, 130 insertions(+), 19 deletions(-)
diff --git a/mcpgateway/meta_server/service.py b/mcpgateway/meta_server/service.py
index a00a5d50e1..83cd4507b8 100644
--- a/mcpgateway/meta_server/service.py
+++ b/mcpgateway/meta_server/service.py
@@ -264,6 +264,24 @@ def _to_snake(key: str) -> str:
return {_to_snake(k): v for k, v in arguments.items()}
+ @staticmethod
+ def _extract_user_context(kwargs: Dict[str, Any]) -> tuple:
+ """Extract access-control parameters from handler kwargs.
+
+ ``handle_meta_tool_call`` passes ``user_email``, ``token_teams``,
+ and ``request_headers`` through to every handler. This helper
+ provides a single extraction point so handlers don't repeat the
+ pattern.
+
+ Returns:
+ Tuple of (user_email, token_teams, request_headers).
+ """
+ return (
+ kwargs.get("user_email"),
+ kwargs.get("token_teams"),
+ kwargs.get("request_headers"),
+ )
+
# ------------------------------------------------------------------
# Implemented handlers
# ------------------------------------------------------------------
@@ -297,6 +315,9 @@ async def _search_tools(self, arguments: Dict[str, Any], **kwargs: Any) -> Dict[
tags = arguments.get("tags", [])
include_metrics = arguments.get("include_metrics", False)
+ # -- Extract user context for access control --
+ user_email, token_teams, _ = self._extract_user_context(kwargs)
+
# -- Step 1: Semantic search --
semantic_results = []
try:
@@ -309,39 +330,44 @@ async def _search_tools(self, arguments: Dict[str, Any], **kwargs: Any) -> Dict[
# -- Step 2: Keyword fallback search --
keyword_results = []
try:
+ from mcpgateway.services.tool_service import ToolService as _KwToolService # pylint: disable=import-outside-toplevel
+ _kw_ts = _KwToolService()
+
db_gen = get_db()
db = next(db_gen)
try:
- search_pattern = f"%{query}%"
- keyword_tools = (
- db.query(Tool)
- .filter(
- Tool.enabled.is_(True),
- or_(
- Tool._computed_name.ilike(search_pattern),
- Tool.description.ilike(search_pattern),
- ),
- )
- .limit(limit)
- .all()
+ # Use ToolService.list_tools for consistent access control
+ kw_result = await _kw_ts.list_tools(
+ db=db,
+ include_inactive=False,
+ limit=limit,
+ user_email=user_email,
+ token_teams=token_teams,
)
+ kw_tools_list, _ = kw_result if isinstance(kw_result, tuple) else (kw_result, None)
query_lower = query.lower()
- for tool in keyword_tools:
+ search_pattern = query_lower
+ for tool in kw_tools_list:
+ tool_name = getattr(tool, "name", "")
+ tool_desc = getattr(tool, "description", "") or ""
+ # Filter to only matching tools
+ if search_pattern not in tool_name.lower() and search_pattern not in tool_desc.lower():
+ continue
# Score 1.0 for exact name match, 0.5 for partial match
- if tool._computed_name.lower() == query_lower:
+ if tool_name.lower() == query_lower:
score = 1.0
- elif query_lower in tool.name.lower():
+ elif query_lower in tool_name.lower():
score = 0.7
else:
score = 0.5
keyword_results.append(
ToolSearchResult(
- tool_name=tool.name,
- description=tool.description,
- server_id=tool.gateway_id,
- server_name=tool.gateway.name if tool.gateway else None,
+ tool_name=tool_name,
+ description=tool_desc,
+ server_id=getattr(tool, "gateway_id", None),
+ server_name=None,
similarity_score=score,
)
)
@@ -414,6 +440,9 @@ async def _get_similar_tools(self, arguments: Dict[str, Any], **kwargs: Any) ->
tool_name = arguments.get("tool_name", "")
limit = arguments.get("limit", 10)
+ # -- Extract user context for access control --
+ user_email, token_teams, _ = self._extract_user_context(kwargs)
+
if not tool_name:
return GetSimilarToolsResponse(
reference_tool=tool_name,
@@ -501,6 +530,34 @@ async def _get_similar_tools(self, arguments: Dict[str, Any], **kwargs: Any) ->
# -- Step 4: Filter out the reference tool itself --
similar_results = [r for r in similar_results if r.tool_name != tool_name][:limit]
+ # -- Step 4.5: Apply access-control filtering --
+ # Build set of tool names the user can access, then discard the rest.
+ if user_email is not None or token_teams is not None:
+ try:
+ from mcpgateway.services.tool_service import ToolService as _AcToolService # pylint: disable=import-outside-toplevel
+
+ _ac_ts = _AcToolService()
+ db_gen = get_db()
+ db = next(db_gen)
+ try:
+ ac_result = await _ac_ts.list_tools(
+ db=db,
+ include_inactive=False,
+ limit=0,
+ user_email=user_email,
+ token_teams=token_teams,
+ )
+ ac_tools_list, _ = ac_result if isinstance(ac_result, tuple) else (ac_result, None)
+ accessible_names = {getattr(t, "name", "") for t in ac_tools_list}
+ similar_results = [r for r in similar_results if r.tool_name in accessible_names]
+ finally:
+ try:
+ next(db_gen)
+ except StopIteration:
+ pass
+ except Exception as e:
+ logger.warning(f"Access control filtering failed for similar tools: {e}")
+
# -- Step 5: Apply scope filtering --
filtered_results = self._apply_scope_filtering(similar_results, arguments.get("scope"))
@@ -738,6 +795,9 @@ async def _list_tools(self, arguments: Dict[str, Any], **kwargs: Any) -> Dict[st
sort_order = arguments.get("sort_order", "desc")
include_schema = arguments.get("include_schema", False)
+ # -- Extract user context for access control --
+ user_email, token_teams, _ = self._extract_user_context(kwargs)
+
# -- Step 1: Query tools from database using ToolService --
# First-Party
from mcpgateway.services.tool_service import ToolService
@@ -759,6 +819,8 @@ async def _list_tools(self, arguments: Dict[str, Any], **kwargs: Any) -> Dict[st
tags=tags if tags else None,
gateway_id=server_id,
limit=query_limit,
+ user_email=user_email,
+ token_teams=token_teams,
)
# Extract tools from result (could be tuple or dict)
@@ -838,6 +900,8 @@ async def _stub_describe_tool(self, arguments: Dict[str, Any], **kwargs: Any) ->
# First-Party
from mcpgateway.services.meta_tool_service import MetaToolService
+ user_email, token_teams, _ = self._extract_user_context(kwargs)
+
try:
db_gen = get_db()
db = next(db_gen)
@@ -847,6 +911,8 @@ async def _stub_describe_tool(self, arguments: Dict[str, Any], **kwargs: Any) ->
tool_name=arguments.get("tool_name", ""),
include_metrics=arguments.get("include_metrics", False),
scope=arguments.get("scope"),
+ user_email=user_email,
+ token_teams=token_teams,
)
return result.model_dump(by_alias=True)
finally:
@@ -1278,18 +1344,26 @@ async def _list_resources(self, arguments: Dict[str, Any], **kwargs: Any) -> Dic
ListResourcesResponse as dict.
"""
from mcpgateway.db import Resource # pylint: disable=import-outside-toplevel
+ from mcpgateway.services.resource_service import ResourceService as _RsService # pylint: disable=import-outside-toplevel
limit = arguments.get("limit", 50)
offset = arguments.get("offset", 0)
tags = arguments.get("tags", [])
mime_type = arguments.get("mime_type")
+ # Extract user context for access control
+ user_email, token_teams, _ = self._extract_user_context(kwargs)
+
try:
db_gen = get_db()
db = next(db_gen)
try:
query = db.query(Resource).filter(Resource.enabled.is_(True))
+ # Apply access control
+ _rs = _RsService()
+ query = await _rs._apply_access_control(query, db, user_email, token_teams)
+
if mime_type:
query = query.filter(Resource.mime_type == mime_type)
@@ -1347,6 +1421,7 @@ async def _read_resource(self, arguments: Dict[str, Any], **kwargs: Any) -> Dict
"""
from mcpgateway.db import Resource # pylint: disable=import-outside-toplevel
from mcpgateway.services.observability_service import ObservabilityService, current_trace_id # pylint: disable=import-outside-toplevel
+ from mcpgateway.services.resource_service import ResourceService as _RsService # pylint: disable=import-outside-toplevel
uri = arguments.get("uri", "")
if not uri:
@@ -1356,6 +1431,9 @@ async def _read_resource(self, arguments: Dict[str, Any], **kwargs: Any) -> Dict
text="Error: uri is required",
).model_dump(by_alias=True)
+ # Extract user context for access control
+ user_email, token_teams, _ = self._extract_user_context(kwargs)
+
start_time = time.monotonic()
success = False
error_message = None
@@ -1400,6 +1478,16 @@ async def _read_resource(self, arguments: Dict[str, Any], **kwargs: Any) -> Dict
text=error_message,
).model_dump(by_alias=True)
+ # Check access control
+ _rs = _RsService()
+ if not await _rs._check_resource_access(db, resource, user_email, token_teams):
+ error_message = f"Resource not found: {uri}"
+ return ReadResourceResponse(
+ uri=uri,
+ name="",
+ text=error_message,
+ ).model_dump(by_alias=True)
+
text_content = resource.text_content
if text_content is None and resource.binary_content is not None:
text_content = "(binary content — not displayable as text)"
@@ -1457,16 +1545,25 @@ async def _list_prompts(self, arguments: Dict[str, Any], **kwargs: Any) -> Dict[
ListPromptsResponse as dict.
"""
from mcpgateway.db import Prompt # pylint: disable=import-outside-toplevel
+ from mcpgateway.services.prompt_service import PromptService as _PsService # pylint: disable=import-outside-toplevel
limit = arguments.get("limit", 50)
offset = arguments.get("offset", 0)
tags = arguments.get("tags", [])
+ # Extract user context for access control
+ user_email, token_teams, _ = self._extract_user_context(kwargs)
+
try:
db_gen = get_db()
db = next(db_gen)
try:
query = db.query(Prompt).filter(Prompt.enabled.is_(True))
+
+ # Apply access control
+ _ps = _PsService()
+ query = await _ps._apply_access_control(query, db, user_email, token_teams)
+
all_prompts = query.order_by(Prompt.created_at.desc()).all()
# Apply tag filtering in Python (tags stored as JSON)
@@ -1519,6 +1616,7 @@ async def _get_prompt(self, arguments: Dict[str, Any], **kwargs: Any) -> Dict[st
"""
from mcpgateway.db import Prompt # pylint: disable=import-outside-toplevel
from mcpgateway.services.observability_service import ObservabilityService, current_trace_id # pylint: disable=import-outside-toplevel
+ from mcpgateway.services.prompt_service import PromptService as _PsService # pylint: disable=import-outside-toplevel
name = arguments.get("name", "")
prompt_args = arguments.get("arguments", {})
@@ -1530,6 +1628,9 @@ async def _get_prompt(self, arguments: Dict[str, Any], **kwargs: Any) -> Dict[st
description="Error: name is required",
).model_dump(by_alias=True)
+ # Extract user context for access control
+ user_email, token_teams, _ = self._extract_user_context(kwargs)
+
start_time = time.monotonic()
success = False
error_message = None
@@ -1575,6 +1676,16 @@ async def _get_prompt(self, arguments: Dict[str, Any], **kwargs: Any) -> Dict[st
description=error_message,
).model_dump(by_alias=True)
+ # Check access control
+ _ps = _PsService()
+ if not await _ps._check_prompt_access(db, prompt, user_email, token_teams):
+ error_message = f"Prompt not found: {name}"
+ return GetPromptResponse(
+ name=name,
+ template="",
+ description=error_message,
+ ).model_dump(by_alias=True)
+
rendered = None
if prompt_args:
try:
From 9be6a19b5a8e5aae239c7b4126529ada0fe21c0c Mon Sep 17 00:00:00 2001
From: Olivier Gintrand
Date: Thu, 16 Apr 2026 11:02:40 +0200
Subject: [PATCH 03/12] ui: hide tool/resource/prompt selectors when
meta-server mode is enabled
Move the meta-server checkbox above the tool/resource/prompt selection
sections in both the create and edit virtual server modals. When
meta-server mode is enabled, the selectors are hidden and replaced
with an info banner explaining that a meta-server dynamically exposes
all items based on the user's access rights.
Changes:
- admin.html: move meta-server checkbox before tool selection in both
create and edit modals; wrap tools/resources/prompts sections in
togglable container divs; add info banner
- servers.js: toggle items section and info banner when populating
the edit modal based on server_type
Signed-off-by: Olivier Gintrand
---
mcpgateway/admin_ui/servers.js | 18 +++
mcpgateway/templates/admin.html | 207 +++++++++++++++++++-------------
2 files changed, 141 insertions(+), 84 deletions(-)
diff --git a/mcpgateway/admin_ui/servers.js b/mcpgateway/admin_ui/servers.js
index c06450c1b3..c95d5eefa9 100644
--- a/mcpgateway/admin_ui/servers.js
+++ b/mcpgateway/admin_ui/servers.js
@@ -887,6 +887,24 @@ export const editServer = async function (serverId) {
hideUnderlyingToolsCheckbox.checked = isMeta ? hideTools : true;
}
+ // Toggle items section and info banner based on meta-server mode
+ const editItemsSection = safeGetElement("edit-server-items-section");
+ const editMetaInfoBanner = safeGetElement("edit-meta-info-banner");
+ if (editItemsSection) {
+ if (isMeta) {
+ editItemsSection.classList.add("hidden");
+ } else {
+ editItemsSection.classList.remove("hidden");
+ }
+ }
+ if (editMetaInfoBanner) {
+ if (isMeta) {
+ editMetaInfoBanner.classList.remove("hidden");
+ } else {
+ editMetaInfoBanner.classList.add("hidden");
+ }
+ }
+
// Store server data for modal population
window.Admin.currentEditingServer = server;
diff --git a/mcpgateway/templates/admin.html b/mcpgateway/templates/admin.html
index 393e111983..af0152e57a 100644
--- a/mcpgateway/templates/admin.html
+++ b/mcpgateway/templates/admin.html
@@ -2957,6 +2957,65 @@
>
+
+
+
+
+
+
+
+ When enabled, this server exposes meta-tools (search, list, describe, execute) instead of individual underlying tools.
+
+
+
+
+
+
+
+
+ Clients will only see the 6 meta-tools, not the individual backend tools.
+
+
+
+
+
+
+
+
+
+
+
Meta-Server mode enabled
+
+ Tool, resource, and prompt selection is not needed. A meta-server dynamically exposes all items
+ the user has access to based on their team membership and visibility rules.
+
+
+
+
+
+
@@ -3175,6 +3234,7 @@
+
-
-
-
-
-
-
-
- When enabled, this server exposes meta-tools (search, list, describe, execute) instead of individual underlying tools.
-
-
-
-
-
-
-
-
- Clients will only see the 6 meta-tools, not the individual backend tools.
-
-
-
-
-
+
+
+
+
+
+
+
+
+ When enabled, this server exposes meta-tools (search, list, describe, execute) instead of individual underlying tools.
+
+
+
+
+
+
+
+
+ Clients will only see the 6 meta-tools, not the individual backend tools.
+
+
+
+
+
+
+
+
+
+
+
Meta-Server mode enabled
+
+ Tool, resource, and prompt selection is not needed. A meta-server dynamically exposes all items
+ the user has access to based on their team membership and visibility rules.
+
+
+
+
+
+
+
+
@@ -11566,6 +11646,7 @@
+
@@ -11643,48 +11724,6 @@
-
-
-
-
-
-
-
- When enabled, this server exposes meta-tools (search, list, describe, execute) instead of individual underlying tools.
-
-
-
-
-
-
-
-
- Clients will only see the 6 meta-tools, not the individual backend tools.
-
-
-
-
-
From b32c8d15086f7eba4c102df670e62c1d9c35ea70 Mon Sep 17 00:00:00 2001
From: Olivier Gintrand
Date: Thu, 16 Apr 2026 11:35:15 +0200
Subject: [PATCH 04/12] ui: also hide MCP server selector when meta-server mode
is enabled
Signed-off-by: Olivier Gintrand
---
mcpgateway/admin_ui/servers.js | 10 +-
mcpgateway/templates/admin.html | 731 ++++++++++++++++++++++++++------
2 files changed, 606 insertions(+), 135 deletions(-)
diff --git a/mcpgateway/admin_ui/servers.js b/mcpgateway/admin_ui/servers.js
index c95d5eefa9..d99354d50c 100644
--- a/mcpgateway/admin_ui/servers.js
+++ b/mcpgateway/admin_ui/servers.js
@@ -887,14 +887,14 @@ export const editServer = async function (serverId) {
hideUnderlyingToolsCheckbox.checked = isMeta ? hideTools : true;
}
- // Toggle items section and info banner based on meta-server mode
- const editItemsSection = safeGetElement("edit-server-items-section");
+ // Toggle gateways+tools wrapper and info banner based on meta-server mode
+ const editGatewaysAndTools = safeGetElement("edit-server-gateways-and-tools");
const editMetaInfoBanner = safeGetElement("edit-meta-info-banner");
- if (editItemsSection) {
+ if (editGatewaysAndTools) {
if (isMeta) {
- editItemsSection.classList.add("hidden");
+ editGatewaysAndTools.classList.add("hidden");
} else {
- editItemsSection.classList.remove("hidden");
+ editGatewaysAndTools.classList.remove("hidden");
}
}
if (editMetaInfoBanner) {
diff --git a/mcpgateway/templates/admin.html b/mcpgateway/templates/admin.html
index af0152e57a..6d1184d636 100644
--- a/mcpgateway/templates/admin.html
+++ b/mcpgateway/templates/admin.html
@@ -2884,8 +2884,68 @@
+ When enabled, this server exposes meta-tools (search, list, describe, execute) instead of individual underlying tools.
+
+
+
+
+
+
+
+
+ Clients will only see the meta-tools, not the individual backend tools.
+
+
+
+
+
+
+
+
+
+
+
Meta-Server mode enabled
+
+ MCP server, tool, resource, and prompt selection is not needed. A meta-server dynamically exposes all items
+ the user has access to based on their team membership and visibility rules.
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
- When enabled, this server exposes meta-tools (search, list, describe, execute) instead of individual underlying tools.
-
-
-
-
-
-
-
-
- Clients will only see the 6 meta-tools, not the individual backend tools.
-
-
-
-
-
-
-
-
-
-
-
Meta-Server mode enabled
-
- Tool, resource, and prompt selection is not needed. A meta-server dynamically exposes all items
- the user has access to based on their team membership and visibility rules.
-
+ Pre-register OAuth client credentials to bypass Dynamic Client Registration (RFC 7591).
+ Required for IdPs like Microsoft Entra ID that do not support DCR.
+
+
+
+
+
+ OAuth client ID registered with the Identity Provider
+
+
+
+
+
+
+ OAuth client secret (stored encrypted). Leave blank if using public client (PKCE only).
+
+
+
+
+
+
+
+
+
+
+
+
+
+ When enabled, this server exposes meta-tools (search, list, describe, execute) instead of individual underlying tools.
+
+
+
+
+
+
+
+
+ Clients will only see the meta-tools, not the individual backend tools.
+
+
@@ -4162,6 +4255,12 @@
tools.
+
+
+
+
+
Team that owns this tool.
+
+
+
+
+
+
Team that owns this resource.
+
+
+
+
+
+
Team that owns this prompt.
+
+
+
+
+
+
Team that owns this gateway.
+
+
+
+
+
+
+ How client credentials are sent to the token endpoint (RFC 6749 Section 2.3)
+
+
+
+ Glob patterns for tools to include (comma-separated, e.g.,
+ "manage-ticket*, manage-task*"). Only matching tools will be
+ imported. Leave empty to include all.
+
+
+
+
+
+
+ Glob patterns for tools to exclude (comma-separated, e.g.,
+ "manage-project*"). Matching tools will be skipped. Leave
+ empty to exclude none.
+
+
+
@@ -7575,6 +7745,12 @@
+
+
+
+
+
+
Team that owns this agent.
@@ -8996,6 +9172,13 @@
filter tools.
+
+
+
+
+
Team that owns this tool.
+
+
+
+
+
+
Team that owns this resource.
+
+
+
+
+
+
Team that owns this prompt.
+
+
+
+
+
+ 🔑 Store Personal Credential
+
+
+ Store a personal API key, token, or credential for gateway:
+
+
+
+
+
+
+
+
+
+
+
The credential is encrypted at rest and only used when you invoke tools on this gateway.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
automatically normalized.
+
+
+
+
+
Team that owns this gateway.
+
@@ -10403,6 +10656,25 @@
read:user")
+
+
+
+
+
+ How client credentials are sent to the token endpoint (RFC 6749 Section 2.3)
+
+
+
+ Glob patterns for tools to include (comma-separated,
+ e.g., "manage-ticket*, manage-task*"). Only matching
+ tools will be imported. Leave empty to include all.
+
+
+
+
+
+
+ Glob patterns for tools to exclude (comma-separated,
+ e.g., "manage-project*"). Matching tools will be
+ skipped. Leave empty to exclude none.
+
+
+
+
+
+
+
+
+
Team that owns this agent.
@@ -11217,6 +11536,13 @@
filter servers.
+
+
+
+
+
Team that owns this server.
+
@@ -11293,8 +11619,69 @@
{% endif %}
+
+
+
+
+
+
+
+
+ When enabled, this server exposes meta-tools (search, list, describe, execute) instead of individual underlying tools.
+
+
+
+
+
+
+
+
+ Clients will only see the meta-tools, not the individual backend tools.
+
+
+
+
+
+
+
+
+
+
+
Meta-Server mode enabled
+
+ MCP server, tool, resource, and prompt selection is not needed. A meta-server dynamically exposes all items
+ the user has access to based on their team membership and visibility rules.
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
- When enabled, this server exposes meta-tools (search, list, describe, execute) instead of individual underlying tools.
-
-
-
-
-
-
-
-
- Clients will only see the 6 meta-tools, not the individual backend tools.
-
-
-
-
-
-
-
-
-
-
-
Meta-Server mode enabled
-
- Tool, resource, and prompt selection is not needed. A meta-server dynamically exposes all items
- the user has access to based on their team membership and visibility rules.
-
-
-
-
-
+
-
-
@@ -11647,6 +11974,8 @@
+
+
@@ -11721,6 +12050,91 @@
Leave blank to use standard discovery from authorization server
+
+
+ MCP OAuth Proxy (DCR Bypass)
+
+
+ Pre-register OAuth client credentials to bypass Dynamic Client Registration (RFC 7591).
+ Required for IdPs like Microsoft Entra ID that do not support DCR.
+
+
+
+
+
+ OAuth client ID registered with the Identity Provider
+