|
| 1 | +"""Integration tests for AsyncShallowRedisSaver.adelete_thread.""" |
| 2 | + |
| 3 | +from __future__ import annotations |
| 4 | + |
| 5 | +from typing import Any, Dict, List, Tuple |
| 6 | + |
| 7 | +import pytest |
| 8 | +from langchain_core.runnables import RunnableConfig |
| 9 | +from langgraph.checkpoint.base import ( |
| 10 | + WRITES_IDX_MAP, |
| 11 | + CheckpointMetadata, |
| 12 | + create_checkpoint, |
| 13 | + empty_checkpoint, |
| 14 | +) |
| 15 | + |
| 16 | +from langgraph.checkpoint.redis.ashallow import AsyncShallowRedisSaver |
| 17 | +from langgraph.checkpoint.redis.util import to_storage_safe_id, to_storage_safe_str |
| 18 | + |
| 19 | + |
| 20 | +def _expected_write_keys( |
| 21 | + *, |
| 22 | + saver: AsyncShallowRedisSaver, |
| 23 | + thread_id: str, |
| 24 | + checkpoint_ns: str, |
| 25 | + checkpoint_id: str, |
| 26 | + task_id: str, |
| 27 | + writes: List[Tuple[str, Any]], |
| 28 | +) -> List[str]: |
| 29 | + """Compute the concrete Redis keys created by aput_writes.""" |
| 30 | + keys: List[str] = [] |
| 31 | + for enum_idx, (channel, _value) in enumerate(writes): |
| 32 | + idx = WRITES_IDX_MAP.get(channel, enum_idx) |
| 33 | + keys.append( |
| 34 | + saver._make_redis_checkpoint_writes_key_cached( # noqa: SLF001 |
| 35 | + thread_id, checkpoint_ns, checkpoint_id, task_id, idx |
| 36 | + ) |
| 37 | + ) |
| 38 | + return keys |
| 39 | + |
| 40 | + |
| 41 | +@pytest.mark.asyncio |
| 42 | +async def test_adelete_thread_cleans_shallow_checkpoints_writes_and_registry( |
| 43 | + redis_url: str, async_client |
| 44 | +) -> None: |
| 45 | + thread_id = "test-ashallow-adelete-thread" |
| 46 | + other_thread_id = "test-ashallow-adelete-thread-other" |
| 47 | + |
| 48 | + # Two namespaces to simulate subgraph usage in shallow mode. |
| 49 | + namespaces = ["", "inner"] |
| 50 | + |
| 51 | + async with AsyncShallowRedisSaver.from_conn_string(redis_url) as saver: |
| 52 | + created: Dict[str, Dict[str, Any]] = {} |
| 53 | + |
| 54 | + for checkpoint_ns in namespaces: |
| 55 | + config: RunnableConfig = { |
| 56 | + "configurable": { |
| 57 | + "thread_id": thread_id, |
| 58 | + "checkpoint_ns": checkpoint_ns, |
| 59 | + } |
| 60 | + } |
| 61 | + checkpoint = create_checkpoint(empty_checkpoint(), {}, 1) |
| 62 | + metadata: CheckpointMetadata = {"source": "input", "step": 1, "writes": {}} |
| 63 | + |
| 64 | + saved_config = await saver.aput(config, checkpoint, metadata, {}) |
| 65 | + checkpoint_id = saved_config["configurable"]["checkpoint_id"] |
| 66 | + |
| 67 | + # Create a couple writes and record expected keys. |
| 68 | + writes = [("channel1", "value1"), ("channel2", "value2")] |
| 69 | + task_id = f"task-{checkpoint_ns or 'root'}" |
| 70 | + await saver.aput_writes(saved_config, writes, task_id) |
| 71 | + |
| 72 | + checkpoint_key = ( |
| 73 | + saver._make_shallow_redis_checkpoint_key_cached( # noqa: SLF001 |
| 74 | + thread_id, checkpoint_ns |
| 75 | + ) |
| 76 | + ) |
| 77 | + zset_key = ( |
| 78 | + f"write_keys_zset:{to_storage_safe_id(thread_id)}:" |
| 79 | + f"{to_storage_safe_str(checkpoint_ns)}:shallow" |
| 80 | + ) |
| 81 | + write_keys = _expected_write_keys( |
| 82 | + saver=saver, |
| 83 | + thread_id=thread_id, |
| 84 | + checkpoint_ns=checkpoint_ns, |
| 85 | + checkpoint_id=checkpoint_id, |
| 86 | + task_id=task_id, |
| 87 | + writes=writes, |
| 88 | + ) |
| 89 | + |
| 90 | + created[checkpoint_ns] = { |
| 91 | + "saved_config": saved_config, |
| 92 | + "checkpoint_key": checkpoint_key, |
| 93 | + "zset_key": zset_key, |
| 94 | + "write_keys": write_keys, |
| 95 | + } |
| 96 | + |
| 97 | + # Also create a checkpoint for a different thread that must not be deleted. |
| 98 | + other_config: RunnableConfig = { |
| 99 | + "configurable": {"thread_id": other_thread_id, "checkpoint_ns": ""} |
| 100 | + } |
| 101 | + other_checkpoint = create_checkpoint(empty_checkpoint(), {}, 1) |
| 102 | + other_saved_config = await saver.aput( |
| 103 | + other_config, |
| 104 | + other_checkpoint, |
| 105 | + {"source": "input", "step": 1, "writes": {}}, |
| 106 | + {}, |
| 107 | + ) |
| 108 | + other_checkpoint_key = ( |
| 109 | + saver._make_shallow_redis_checkpoint_key_cached( # noqa: SLF001 |
| 110 | + other_thread_id, "" |
| 111 | + ) |
| 112 | + ) |
| 113 | + |
| 114 | + # Assert keys exist before deletion (direct key checks; avoids index lag). |
| 115 | + assert await async_client.exists(other_checkpoint_key) == 1 |
| 116 | + assert await saver.aget_tuple(other_saved_config) is not None |
| 117 | + |
| 118 | + for checkpoint_ns in namespaces: |
| 119 | + checkpoint_key = created[checkpoint_ns]["checkpoint_key"] |
| 120 | + zset_key = created[checkpoint_ns]["zset_key"] |
| 121 | + write_keys = created[checkpoint_ns]["write_keys"] |
| 122 | + |
| 123 | + assert await async_client.exists(checkpoint_key) == 1 |
| 124 | + assert await async_client.exists(zset_key) == 1 |
| 125 | + assert await async_client.zcard(zset_key) == len(write_keys) |
| 126 | + |
| 127 | + for key in write_keys: |
| 128 | + assert await async_client.exists(key) == 1 |
| 129 | + |
| 130 | + # Delete everything for thread_id. |
| 131 | + await saver.adelete_thread(thread_id) |
| 132 | + |
| 133 | + # The other thread should still exist. |
| 134 | + assert await async_client.exists(other_checkpoint_key) == 1 |
| 135 | + assert await saver.aget_tuple(other_saved_config) is not None |
| 136 | + |
| 137 | + # Keys for thread_id should be gone. |
| 138 | + for checkpoint_ns in namespaces: |
| 139 | + saved_config = created[checkpoint_ns]["saved_config"] |
| 140 | + checkpoint_key = created[checkpoint_ns]["checkpoint_key"] |
| 141 | + zset_key = created[checkpoint_ns]["zset_key"] |
| 142 | + write_keys = created[checkpoint_ns]["write_keys"] |
| 143 | + |
| 144 | + assert await saver.aget_tuple(saved_config) is None |
| 145 | + assert await async_client.exists(checkpoint_key) == 0 |
| 146 | + assert await async_client.exists(zset_key) == 0 |
| 147 | + |
| 148 | + for key in write_keys: |
| 149 | + assert await async_client.exists(key) == 0 |
0 commit comments