|
| 1 | +""" |
| 2 | +Regression test for issue #213: asyncio.Lock not released on client disconnect. |
| 3 | +
|
| 4 | +The lock in ContextWebSocket.execute() must only be held while sending the |
| 5 | +request to the Jupyter kernel, NOT during the entire streaming phase. Holding |
| 6 | +it during streaming means a client disconnect leaves the lock held until the |
| 7 | +kernel finishes — blocking all subsequent executions on the same context. |
| 8 | +
|
| 9 | +This test mocks the Jupyter WebSocket so no real kernel is needed. |
| 10 | +""" |
| 11 | + |
| 12 | +import asyncio |
| 13 | +import sys |
| 14 | +import os |
| 15 | +import unittest |
| 16 | +from unittest.mock import AsyncMock |
| 17 | + |
| 18 | +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) |
| 19 | + |
| 20 | +from messaging import ContextWebSocket |
| 21 | +from api.models.output import NumberOfExecutions |
| 22 | + |
| 23 | + |
| 24 | +class TestLockRelease(unittest.IsolatedAsyncioTestCase): |
| 25 | + async def test_lock_not_held_during_streaming(self): |
| 26 | + """Lock must be released before streaming results, not after.""" |
| 27 | + ws = ContextWebSocket("test-ctx", "test-session", "python", "/home/user") |
| 28 | + |
| 29 | + # Mock the WebSocket so we don't need a real Jupyter kernel. |
| 30 | + ws._ws = AsyncMock() |
| 31 | + ws._ws.send = AsyncMock() |
| 32 | + |
| 33 | + # Pre-set global env vars so execute() doesn't call get_envs(). |
| 34 | + ws._global_env_vars = {} |
| 35 | + |
| 36 | + # Start the execute() generator. |
| 37 | + gen = ws.execute("print('hello')", env_vars={}, access_token="") |
| 38 | + |
| 39 | + # Drive the generator: it acquires the lock, sends the request, then |
| 40 | + # enters _wait_for_result which blocks on queue.get(). We feed an item |
| 41 | + # into the queue so the generator yields it back to us. |
| 42 | + async def pull_first_item(): |
| 43 | + return await gen.__anext__() |
| 44 | + |
| 45 | + pull_task = asyncio.create_task(pull_first_item()) |
| 46 | + |
| 47 | + # Wait for the generator to register the execution and send the request. |
| 48 | + await asyncio.sleep(0.1) |
| 49 | + |
| 50 | + # Feed an item into the execution queue so _wait_for_result yields. |
| 51 | + assert len(ws._executions) == 1 |
| 52 | + message_id = list(ws._executions.keys())[0] |
| 53 | + await ws._executions[message_id].queue.put( |
| 54 | + NumberOfExecutions(execution_count=1) |
| 55 | + ) |
| 56 | + |
| 57 | + # Get the yielded item — generator is now suspended at the next |
| 58 | + # queue.get() inside _wait_for_result (the streaming phase). |
| 59 | + await asyncio.wait_for(pull_task, timeout=2.0) |
| 60 | + |
| 61 | + # THE KEY ASSERTION: the lock must NOT be held during streaming. |
| 62 | + # Without fix: lock is held for entire execute() -> locked() is True -> FAIL |
| 63 | + # With fix: lock released after send -> locked() is False -> PASS |
| 64 | + assert not ws._lock.locked(), ( |
| 65 | + "Lock is held during result streaming — issue #213: if the client " |
| 66 | + "disconnects now, the lock stays held and blocks all subsequent " |
| 67 | + "executions on this context" |
| 68 | + ) |
| 69 | + |
| 70 | + # Cleanup: close the generator properly. |
| 71 | + await gen.aclose() |
| 72 | + |
| 73 | + |
| 74 | +if __name__ == "__main__": |
| 75 | + unittest.main() |
0 commit comments