@@ -250,19 +250,6 @@ async def on_user_transcript(event: RealtimeUserSpeechTranscriptionEvent):
250250 )
251251 await realtime .simple_audio_response (pcm , test_participant )
252252
253- # Simulate OpenAI creating the conversation item (this is when we map item_id -> participant)
254- item_created_event = {
255- "type" : "conversation.item.created" ,
256- "event_id" : "event_created_123" ,
257- "item" : {
258- "id" : "item_test_456" ,
259- "type" : "message" ,
260- "role" : "user" ,
261- "content" : [],
262- },
263- }
264- await realtime ._handle_openai_event (item_created_event )
265-
266253 # Now simulate receiving the transcription event from OpenAI
267254 openai_event = {
268255 "content_index" : 0 ,
@@ -286,95 +273,3 @@ async def on_user_transcript(event: RealtimeUserSpeechTranscriptionEvent):
286273
287274 # Verify the user_id() helper method works
288275 assert user_transcripts [0 ].user_id () == "test_user_123"
289-
290- async def test_multi_user_participant_tracking (self , realtime ):
291- """Test that participant tracking works correctly when multiple users speak in succession"""
292- user_transcripts = []
293-
294- @realtime .events .subscribe
295- async def on_user_transcript (event : RealtimeUserSpeechTranscriptionEvent ):
296- user_transcripts .append (event )
297-
298- from vision_agents .core .edge .types import Participant
299- from getstream .video .rtc .track_util import PcmData , AudioFormat
300- import numpy as np
301-
302- # User A sends audio
303- participant_a = Participant (original = None , user_id = "user_a" )
304- pcm_a = PcmData (
305- samples = np .zeros (100 , dtype = np .int16 ),
306- sample_rate = 48000 ,
307- format = AudioFormat .S16 ,
308- )
309- await realtime .simple_audio_response (pcm_a , participant_a )
310-
311- # OpenAI creates conversation item for User A
312- item_created_a = {
313- "type" : "conversation.item.created" ,
314- "event_id" : "event_created_a" ,
315- "item" : {
316- "id" : "item_a_123" ,
317- "type" : "message" ,
318- "role" : "user" ,
319- "content" : [],
320- },
321- }
322- await realtime ._handle_openai_event (item_created_a )
323-
324- # User B sends audio (before A's transcription arrives)
325- participant_b = Participant (original = None , user_id = "user_b" )
326- pcm_b = PcmData (
327- samples = np .zeros (100 , dtype = np .int16 ),
328- sample_rate = 48000 ,
329- format = AudioFormat .S16 ,
330- )
331- await realtime .simple_audio_response (pcm_b , participant_b )
332-
333- # OpenAI creates conversation item for User B
334- item_created_b = {
335- "type" : "conversation.item.created" ,
336- "event_id" : "event_created_b" ,
337- "item" : {
338- "id" : "item_b_456" ,
339- "type" : "message" ,
340- "role" : "user" ,
341- "content" : [],
342- },
343- }
344- await realtime ._handle_openai_event (item_created_b )
345-
346- # Now transcriptions arrive (A's transcription arrives AFTER B started speaking)
347- transcription_a = {
348- "content_index" : 0 ,
349- "event_id" : "event_trans_a" ,
350- "item_id" : "item_a_123" , # References User A's item
351- "transcript" : "Hello from User A" ,
352- "type" : "conversation.item.input_audio_transcription.completed" ,
353- "usage" : {"seconds" : 1 , "type" : "duration" },
354- }
355- await realtime ._handle_openai_event (transcription_a )
356-
357- transcription_b = {
358- "content_index" : 0 ,
359- "event_id" : "event_trans_b" ,
360- "item_id" : "item_b_456" , # References User B's item
361- "transcript" : "Hello from User B" ,
362- "type" : "conversation.item.input_audio_transcription.completed" ,
363- "usage" : {"seconds" : 1 , "type" : "duration" },
364- }
365- await realtime ._handle_openai_event (transcription_b )
366-
367- await asyncio .sleep (0.1 )
368-
369- # Verify both transcriptions are attributed to the correct users
370- assert len (user_transcripts ) == 2
371-
372- # User A's transcription should be attributed to User A (not B, despite B speaking more recently)
373- assert user_transcripts [0 ].text == "Hello from User A"
374- assert user_transcripts [0 ].participant is not None
375- assert user_transcripts [0 ].participant .user_id == "user_a"
376-
377- # User B's transcription should be attributed to User B
378- assert user_transcripts [1 ].text == "Hello from User B"
379- assert user_transcripts [1 ].participant is not None
380- assert user_transcripts [1 ].participant .user_id == "user_b"
0 commit comments