-
Notifications
You must be signed in to change notification settings - Fork 192
Expand file tree
/
Copy pathtest_tool_recent_activity.py
More file actions
110 lines (90 loc) · 3.49 KB
/
test_tool_recent_activity.py
File metadata and controls
110 lines (90 loc) · 3.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
"""Tests for discussion context MCP tool."""
import pytest
from mcp.server.fastmcp.exceptions import ToolError
from basic_memory.mcp.tools import recent_activity
from basic_memory.schemas.memory import (
EntitySummary,
ObservationSummary,
RelationSummary,
)
from basic_memory.schemas.search import SearchItemType
# Test data for different timeframe formats
valid_timeframes = [
"7d", # Standard format
"yesterday", # Natural language
"0d", # Zero duration
]
invalid_timeframes = [
"invalid", # Nonsense string
"tomorrow", # Future date
]
@pytest.mark.asyncio
async def test_recent_activity_timeframe_formats(client, test_graph):
"""Test that recent_activity accepts various timeframe formats."""
# Test each valid timeframe
for timeframe in valid_timeframes:
try:
result = await recent_activity(
type=["entity"], timeframe=timeframe, page=1, page_size=10, max_related=10
)
assert result is not None
except Exception as e:
pytest.fail(f"Failed with valid timeframe '{timeframe}': {str(e)}")
# Test invalid timeframes should raise ValidationError
for timeframe in invalid_timeframes:
with pytest.raises(ToolError):
await recent_activity(timeframe=timeframe)
@pytest.mark.asyncio
async def test_recent_activity_type_filters(client, test_graph):
"""Test that recent_activity correctly filters by types."""
# Test single string type
result = await recent_activity(type=SearchItemType.ENTITY)
assert result is not None
assert all(isinstance(r, EntitySummary) for r in result.primary_results)
# Test single string type
result = await recent_activity(type="entity")
assert result is not None
assert all(isinstance(r, EntitySummary) for r in result.primary_results)
# Test single type
result = await recent_activity(type=["entity"])
assert result is not None
assert all(isinstance(r, EntitySummary) for r in result.primary_results)
# Test multiple types
result = await recent_activity(type=["entity", "observation"])
assert result is not None
assert all(
isinstance(r, EntitySummary) or isinstance(r, ObservationSummary)
for r in result.primary_results
)
# Test multiple types
result = await recent_activity(type=[SearchItemType.ENTITY, SearchItemType.OBSERVATION])
assert result is not None
assert all(
isinstance(r, EntitySummary) or isinstance(r, ObservationSummary)
for r in result.primary_results
)
# Test all types
result = await recent_activity(type=["entity", "observation", "relation"])
assert result is not None
# Results can be any type
assert all(
isinstance(r, EntitySummary)
or isinstance(r, ObservationSummary)
or isinstance(r, RelationSummary)
for r in result.primary_results
)
@pytest.mark.asyncio
async def test_recent_activity_type_invalid(client, test_graph):
"""Test that recent_activity correctly filters by types."""
# Test single invalid string type
with pytest.raises(ValueError) as e:
await recent_activity(type="note")
assert (
str(e.value) == "Invalid type: note. Valid types are: ['entity', 'observation', 'relation']"
)
# Test invalid string array type
with pytest.raises(ValueError) as e:
await recent_activity(type=["note"])
assert (
str(e.value) == "Invalid type: note. Valid types are: ['entity', 'observation', 'relation']"
)