@@ -28,7 +28,37 @@ import asyncio
2828from copilot import CopilotClient, PermissionHandler
2929
3030async def main ():
31- # Create and start client
31+ # Client automatically starts on enter and cleans up on exit
32+ async with CopilotClient() as client:
33+ # Create a session with automatic cleanup
34+ async with await client.create_session({" model" : " gpt-5" }) as session:
35+ # Wait for response using session.idle event
36+ done = asyncio.Event()
37+
38+ def on_event (event ):
39+ if event.type.value == " assistant.message" :
40+ print (event.data.content)
41+ elif event.type.value == " session.idle" :
42+ done.set()
43+
44+ session.on(on_event)
45+
46+ # Send a message and wait for completion
47+ await session.send(" What is 2+2?" )
48+ await done.wait()
49+
50+ asyncio.run(main())
51+ ```
52+
53+ ### Manual Resource Management
54+
55+ If you need more control over the lifecycle, you can call ` start() ` , ` stop() ` , and ` disconnect() ` manually:
56+
57+ ``` python
58+ import asyncio
59+ from copilot import CopilotClient
60+
61+ async def main ():
3262 client = CopilotClient()
3363 await client.start()
3464
@@ -38,7 +68,6 @@ async def main():
3868 " on_permission_request" : PermissionHandler.approve_all,
3969 })
4070
41- # Wait for response using session.idle event
4271 done = asyncio.Event()
4372
4473 def on_event (event ):
@@ -48,29 +77,16 @@ async def main():
4877 done.set()
4978
5079 session.on(on_event)
51-
52- # Send a message and wait for completion
5380 await session.send(" What is 2+2?" )
5481 await done.wait()
5582
56- # Clean up
83+ # Clean up manually
5784 await session.disconnect()
5885 await client.stop()
5986
6087asyncio.run(main())
6188```
6289
63- Sessions also support the ` async with ` context manager pattern for automatic cleanup:
64-
65- ``` python
66- async with await client.create_session({
67- " model" : " gpt-5" ,
68- " on_permission_request" : PermissionHandler.approve_all,
69- }) as session:
70- await session.send(" What is 2+2?" )
71- # session is automatically disconnected when leaving the block
72- ```
73-
7490## Features
7591
7692- ✅ Full JSON-RPC protocol support
@@ -79,6 +95,7 @@ async with await client.create_session({
7995- ✅ Session history with ` get_messages() `
8096- ✅ Type hints throughout
8197- ✅ Async/await native
98+ - ✅ Async context manager support for automatic resource cleanup
8299
83100## API Reference
84101
@@ -87,24 +104,19 @@ async with await client.create_session({
87104``` python
88105from copilot import CopilotClient, SubprocessConfig
89106
90- # Spawn a local CLI process (default)
91- client = CopilotClient() # uses bundled CLI, stdio transport
92- await client.start()
93-
94- session = await client.create_session({" model" : " gpt-5" })
107+ async with CopilotClient() as client:
108+ async with await client.create_session({" model" : " gpt-5" }) as session:
109+ def on_event (event ):
110+ print (f " Event: { event[' type' ]} " )
95111
96- def on_event ( event ):
97- print ( f " Event: { event[ ' type ' ] } " )
112+ session.on(on_event)
113+ await session.send( " Hello! " )
98114
99- session.on(on_event)
100- await session.send(" Hello!" )
101-
102- # ... wait for events ...
103-
104- await session.disconnect()
105- await client.stop()
115+ # ... wait for events ...
106116```
107117
118+ > ** Note:** For manual lifecycle management, see [ Manual Resource Management] ( #manual-resource-management ) above.
119+
108120``` python
109121from copilot import CopilotClient, ExternalServerConfig
110122
@@ -199,10 +211,11 @@ async def lookup_issue(params: LookupIssueParams) -> str:
199211 issue = await fetch_issue(params.id)
200212 return issue.summary
201213
202- session = await client.create_session({
214+ async with await client.create_session({
203215 " model" : " gpt-5" ,
204216 " tools" : [lookup_issue],
205- })
217+ }) as session:
218+ ...
206219```
207220
208221> ** Note:** When using ` from __future__ import annotations ` , define Pydantic models at module level (not inside functions).
@@ -224,7 +237,7 @@ async def lookup_issue(invocation):
224237 " sessionLog" : f " Fetched issue { issue_id} " ,
225238 }
226239
227- session = await client.create_session({
240+ async with await client.create_session({
228241 " model" : " gpt-5" ,
229242 " tools" : [
230243 Tool(
@@ -240,7 +253,8 @@ session = await client.create_session({
240253 handler = lookup_issue,
241254 )
242255 ],
243- })
256+ }) as session:
257+ ...
244258```
245259
246260The SDK automatically handles ` tool.call ` , executes your handler (sync or async), and responds with the final result when the tool completes.
@@ -313,44 +327,38 @@ import asyncio
313327from copilot import CopilotClient
314328
315329async def main ():
316- client = CopilotClient()
317- await client.start()
318-
319- session = await client.create_session({
320- " model" : " gpt-5" ,
321- " streaming" : True
322- })
323-
324- # Use asyncio.Event to wait for completion
325- done = asyncio.Event()
326-
327- def on_event (event ):
328- if event.type.value == " assistant.message_delta" :
329- # Streaming message chunk - print incrementally
330- delta = event.data.delta_content or " "
331- print (delta, end = " " , flush = True )
332- elif event.type.value == " assistant.reasoning_delta" :
333- # Streaming reasoning chunk (if model supports reasoning)
334- delta = event.data.delta_content or " "
335- print (delta, end = " " , flush = True )
336- elif event.type.value == " assistant.message" :
337- # Final message - complete content
338- print (" \n --- Final message ---" )
339- print (event.data.content)
340- elif event.type.value == " assistant.reasoning" :
341- # Final reasoning content (if model supports reasoning)
342- print (" --- Reasoning ---" )
343- print (event.data.content)
344- elif event.type.value == " session.idle" :
345- # Session finished processing
346- done.set()
347-
348- session.on(on_event)
349- await session.send(" Tell me a short story" )
350- await done.wait() # Wait for streaming to complete
351-
352- await session.disconnect()
353- await client.stop()
330+ async with CopilotClient() as client:
331+ async with await client.create_session({
332+ " model" : " gpt-5" ,
333+ " streaming" : True ,
334+ }) as session:
335+ # Use asyncio.Event to wait for completion
336+ done = asyncio.Event()
337+
338+ def on_event (event ):
339+ if event.type.value == " assistant.message_delta" :
340+ # Streaming message chunk - print incrementally
341+ delta = event.data.delta_content or " "
342+ print (delta, end = " " , flush = True )
343+ elif event.type.value == " assistant.reasoning_delta" :
344+ # Streaming reasoning chunk (if model supports reasoning)
345+ delta = event.data.delta_content or " "
346+ print (delta, end = " " , flush = True )
347+ elif event.type.value == " assistant.message" :
348+ # Final message - complete content
349+ print (" \n --- Final message ---" )
350+ print (event.data.content)
351+ elif event.type.value == " assistant.reasoning" :
352+ # Final reasoning content (if model supports reasoning)
353+ print (" --- Reasoning ---" )
354+ print (event.data.content)
355+ elif event.type.value == " session.idle" :
356+ # Session finished processing
357+ done.set()
358+
359+ session.on(on_event)
360+ await session.send(" Tell me a short story" )
361+ await done.wait() # Wait for streaming to complete
354362
355363asyncio.run(main())
356364```
@@ -370,27 +378,28 @@ By default, sessions use **infinite sessions** which automatically manage contex
370378
371379``` python
372380# Default: infinite sessions enabled with default thresholds
373- session = await client.create_session({" model" : " gpt-5" })
374-
375- # Access the workspace path for checkpoints and files
376- print (session.workspace_path)
377- # => ~/.copilot/session-state/{session_id}/
381+ async with await client.create_session({" model" : " gpt-5" }) as session:
382+ # Access the workspace path for checkpoints and files
383+ print (session.workspace_path)
384+ # => ~/.copilot/session-state/{session_id}/
378385
379386# Custom thresholds
380- session = await client.create_session({
387+ async with await client.create_session({
381388 " model" : " gpt-5" ,
382389 " infinite_sessions" : {
383390 " enabled" : True ,
384391 " background_compaction_threshold" : 0.80 , # Start compacting at 80% context usage
385392 " buffer_exhaustion_threshold" : 0.95 , # Block at 95% until compaction completes
386393 },
387- })
394+ }) as session:
395+ ...
388396
389397# Disable infinite sessions
390- session = await client.create_session({
398+ async with await client.create_session({
391399 " model" : " gpt-5" ,
392400 " infinite_sessions" : {" enabled" : False },
393- })
401+ }) as session:
402+ ...
394403```
395404
396405When enabled, sessions emit compaction events:
@@ -414,39 +423,39 @@ The SDK supports custom OpenAI-compatible API providers (BYOK - Bring Your Own K
414423** Example with Ollama:**
415424
416425``` python
417- session = await client.create_session({
426+ async with await client.create_session({
418427 " model" : " deepseek-coder-v2:16b" , # Required when using custom provider
419428 " provider" : {
420429 " type" : " openai" ,
421430 " base_url" : " http://localhost:11434/v1" , # Ollama endpoint
422431 # api_key not required for Ollama
423432 },
424- })
425-
426- await session.send(" Hello!" )
433+ }) as session:
434+ await session.send(" Hello!" )
427435```
428436
429437** Example with custom OpenAI-compatible API:**
430438
431439``` python
432440import os
433441
434- session = await client.create_session({
442+ async with await client.create_session({
435443 " model" : " gpt-4" ,
436444 " provider" : {
437445 " type" : " openai" ,
438446 " base_url" : " https://my-api.example.com/v1" ,
439447 " api_key" : os.environ[" MY_API_KEY" ],
440448 },
441- })
449+ }) as session:
450+ ...
442451```
443452
444453** Example with Azure OpenAI:**
445454
446455``` python
447456import os
448457
449- session = await client.create_session({
458+ async with await client.create_session({
450459 " model" : " gpt-4" ,
451460 " provider" : {
452461 " type" : " azure" , # Must be "azure" for Azure endpoints, NOT "openai"
@@ -456,7 +465,8 @@ session = await client.create_session({
456465 " api_version" : " 2024-10-21" ,
457466 },
458467 },
459- })
468+ }) as session:
469+ ...
460470```
461471
462472> ** Important notes:**
@@ -595,10 +605,11 @@ async def handle_user_input(request, invocation):
595605 " wasFreeform" : True , # Whether the answer was freeform (not from choices)
596606 }
597607
598- session = await client.create_session({
608+ async with await client.create_session({
599609 " model" : " gpt-5" ,
600610 " on_user_input_request" : handle_user_input,
601- })
611+ }) as session:
612+ ...
602613```
603614
604615## Session Hooks
@@ -642,7 +653,7 @@ async def on_error_occurred(input, invocation):
642653 " errorHandling" : " retry" , # "retry", "skip", or "abort"
643654 }
644655
645- session = await client.create_session({
656+ async with await client.create_session({
646657 " model" : " gpt-5" ,
647658 " hooks" : {
648659 " on_pre_tool_use" : on_pre_tool_use,
@@ -652,7 +663,8 @@ session = await client.create_session({
652663 " on_session_end" : on_session_end,
653664 " on_error_occurred" : on_error_occurred,
654665 },
655- })
666+ }) as session:
667+ ...
656668```
657669
658670** Available hooks:**
0 commit comments