|
| 1 | +<!doctype html> |
| 2 | +<html lang="en-US"> |
| 3 | + <head> |
| 4 | + <link href="/assets/index.css" rel="stylesheet" type="text/css" /> |
| 5 | + <script crossorigin="anonymous" src="https://unpkg.com/@babel/standalone@7.8.7/babel.min.js"></script> |
| 6 | + <script crossorigin="anonymous" src="https://unpkg.com/react@16.8.6/umd/react.production.min.js"></script> |
| 7 | + <script crossorigin="anonymous" src="https://unpkg.com/react-dom@16.8.6/umd/react-dom.production.min.js"></script> |
| 8 | + <script crossorigin="anonymous" src="/test-harness.js"></script> |
| 9 | + <script crossorigin="anonymous" src="/test-page-object.js"></script> |
| 10 | + <script crossorigin="anonymous" src="/__dist__/webchat-es5.js"></script> |
| 11 | + <script crossorigin="anonymous" src="/__dist__/botframework-webchat-fluent-theme.production.min.js"></script> |
| 12 | + </head> |
| 13 | + <body> |
| 14 | + <main id="webchat"></main> |
| 15 | + <!-- |
| 16 | + Test: Mute/Unmute functionality for Speech-to-Speech |
| 17 | + |
| 18 | + This test validates: |
| 19 | + 1. Listening state can transition to muted and back to listening |
| 20 | + 2. Other states (idle) cannot transition to muted |
| 21 | + 3. Muted chunks contain all zeros (silent audio) |
| 22 | + 4. Uses useVoiceRecordingMuted hook via Composer pattern for mute/unmute control |
| 23 | + --> |
| 24 | + <script type="module"> |
| 25 | + import { setupMockMediaDevices } from '/assets/esm/speechToSpeech/mockMediaDevices.js'; |
| 26 | + import { setupMockAudioPlayback } from '/assets/esm/speechToSpeech/mockAudioPlayback.js'; |
| 27 | + |
| 28 | + setupMockMediaDevices(); |
| 29 | + setupMockAudioPlayback(); |
| 30 | + </script> |
| 31 | + <script type="text/babel"> |
| 32 | + run(async function () { |
| 33 | + const { |
| 34 | + React: { useEffect }, |
| 35 | + ReactDOM: { render }, |
| 36 | + WebChat: { |
| 37 | + FluentThemeProvider, |
| 38 | + testIds, |
| 39 | + hooks: { useVoiceRecordingMuted }, |
| 40 | + Components: { Composer, BasicWebChat } |
| 41 | + } |
| 42 | + } = window; |
| 43 | + |
| 44 | + // Helper to decode base64 audio and check if all zeros |
| 45 | + function isAudioAllZeros(base64Content) { |
| 46 | + const binaryString = atob(base64Content); |
| 47 | + const bytes = new Uint8Array(binaryString.length); |
| 48 | + for (let i = 0; i < binaryString.length; i++) { |
| 49 | + bytes[i] = binaryString.charCodeAt(i); |
| 50 | + } |
| 51 | + return bytes.every(byte => byte === 0); |
| 52 | + } |
| 53 | + |
| 54 | + // Helper to check if audio has non-zero data (real audio) |
| 55 | + function hasNonZeroAudio(base64Content) { |
| 56 | + const binaryString = atob(base64Content); |
| 57 | + const bytes = new Uint8Array(binaryString.length); |
| 58 | + for (let i = 0; i < binaryString.length; i++) { |
| 59 | + bytes[i] = binaryString.charCodeAt(i); |
| 60 | + } |
| 61 | + return bytes.some(byte => byte !== 0); |
| 62 | + } |
| 63 | + |
| 64 | + const audioChunks = []; |
| 65 | + let currentVoiceState = 'idle'; |
| 66 | + |
| 67 | + // Setup Web Chat with Speech-to-Speech |
| 68 | + const { directLine, store } = testHelpers.createDirectLineEmulator(); |
| 69 | + directLine.setCapability('getVoiceConfiguration', { sampleRate: 24000, chunkIntervalMs: 100 }, { emitEvent: false }); |
| 70 | + |
| 71 | + // Track voiceState changes |
| 72 | + store.subscribe(() => { |
| 73 | + currentVoiceState = store.getState().voice?.voiceState || 'idle'; |
| 74 | + }); |
| 75 | + |
| 76 | + // Intercept postActivity to capture outgoing voice chunks |
| 77 | + const originalPostActivity = directLine.postActivity.bind(directLine); |
| 78 | + directLine.postActivity = activity => { |
| 79 | + if (activity.name === 'media.chunk' && activity.type === 'event') { |
| 80 | + audioChunks.push({ |
| 81 | + content: activity.value?.content, |
| 82 | + voiceState: currentVoiceState |
| 83 | + }); |
| 84 | + } |
| 85 | + return originalPostActivity(activity); |
| 86 | + }; |
| 87 | + |
| 88 | + // Component to expose hook functions for testing (inside Composer context) |
| 89 | + let muteControlRef = { setMuted: null, muted: false }; |
| 90 | + |
| 91 | + const MuteController = () => { |
| 92 | + const [muted, setMuted] = useVoiceRecordingMuted(); |
| 93 | + |
| 94 | + useEffect(() => { |
| 95 | + muteControlRef.setMuted = setMuted; |
| 96 | + }, [setMuted]); |
| 97 | + |
| 98 | + // Update muted on every render to capture latest value |
| 99 | + muteControlRef.muted = muted; |
| 100 | + |
| 101 | + return false; |
| 102 | + }; |
| 103 | + |
| 104 | + // Helper to get voice state from store |
| 105 | + const getVoiceState = () => store.getState().voice?.voiceState; |
| 106 | + |
| 107 | + render( |
| 108 | + <FluentThemeProvider variant="fluent"> |
| 109 | + <Composer directLine={directLine} store={store}> |
| 110 | + <BasicWebChat /> |
| 111 | + <MuteController /> |
| 112 | + </Composer> |
| 113 | + </FluentThemeProvider>, |
| 114 | + document.getElementById('webchat') |
| 115 | + ); |
| 116 | + |
| 117 | + await pageConditions.uiConnected(); |
| 118 | + |
| 119 | + const micButton = document.querySelector(`[data-testid="${testIds.sendBoxMicrophoneButton}"]`); |
| 120 | + expect(micButton).toBeTruthy(); |
| 121 | + |
| 122 | + // ===== TEST 1: Muting from idle state should be no-op ===== |
| 123 | + expect(getVoiceState()).toBe('idle'); |
| 124 | + expect(muteControlRef.muted).toBe(false); |
| 125 | + |
| 126 | + muteControlRef.setMuted(true); |
| 127 | + await new Promise(r => setTimeout(r, 100)); |
| 128 | + |
| 129 | + expect(getVoiceState()).toBe('idle'); // Still idle, not muted |
| 130 | + expect(muteControlRef.muted).toBe(false); |
| 131 | + |
| 132 | + // ===== TEST 2: Start recording → listening state ===== |
| 133 | + await host.click(micButton); |
| 134 | + |
| 135 | + await pageConditions.became( |
| 136 | + 'Voice state is listening', |
| 137 | + () => getVoiceState() === 'listening', |
| 138 | + 2000 |
| 139 | + ); |
| 140 | + |
| 141 | + // Wait for some listening chunks |
| 142 | + await pageConditions.became( |
| 143 | + 'At least 2 listening chunks received', |
| 144 | + () => audioChunks.filter(c => c.voiceState === 'listening').length >= 2, |
| 145 | + 2000 |
| 146 | + ); |
| 147 | + |
| 148 | + // ===== TEST 3: Mute from listening state → muted state ===== |
| 149 | + muteControlRef.setMuted(true); |
| 150 | + |
| 151 | + await pageConditions.became( |
| 152 | + 'Voice state is muted', |
| 153 | + () => getVoiceState() === 'muted', |
| 154 | + 1000 |
| 155 | + ); |
| 156 | + |
| 157 | + expect(muteControlRef.muted).toBe(true); |
| 158 | + |
| 159 | + // Wait for muted chunks |
| 160 | + await pageConditions.became( |
| 161 | + 'At least 2 muted chunks received', |
| 162 | + () => audioChunks.filter(c => c.voiceState === 'muted').length >= 2, |
| 163 | + 2000 |
| 164 | + ); |
| 165 | + |
| 166 | + // ===== TEST 4: Verify muted chunks are all zeros ===== |
| 167 | + const mutedChunks = audioChunks.filter(c => c.voiceState === 'muted'); |
| 168 | + expect(mutedChunks.length).toBeGreaterThanOrEqual(2); |
| 169 | + for (const chunk of mutedChunks) { |
| 170 | + expect(isAudioAllZeros(chunk.content)).toBe(true); |
| 171 | + } |
| 172 | + |
| 173 | + // ===== TEST 5: Unmute → back to listening state ===== |
| 174 | + muteControlRef.setMuted(false); |
| 175 | + |
| 176 | + await pageConditions.became( |
| 177 | + 'Voice state is listening after unmute', |
| 178 | + () => getVoiceState() === 'listening', |
| 179 | + 1000 |
| 180 | + ); |
| 181 | + |
| 182 | + expect(muteControlRef.muted).toBe(false); |
| 183 | + |
| 184 | + // Wait for more chunks after unmute |
| 185 | + const chunksBeforeCheck = audioChunks.length; |
| 186 | + await pageConditions.became( |
| 187 | + 'New chunks received after unmute', |
| 188 | + () => audioChunks.length > chunksBeforeCheck + 1, |
| 189 | + 2000 |
| 190 | + ); |
| 191 | + |
| 192 | + // ===== TEST 6: Verify listening chunks contain real (non-zero) audio ===== |
| 193 | + const listeningChunks = audioChunks.filter(c => c.voiceState === 'listening'); |
| 194 | + expect(listeningChunks.length).toBeGreaterThanOrEqual(4); // At least 2 before mute + 2 after unmute |
| 195 | + |
| 196 | + // Verify listening audio is non-zero (real audio) |
| 197 | + for (const chunk of listeningChunks) { |
| 198 | + expect(hasNonZeroAudio(chunk.content)).toBe(true); |
| 199 | + } |
| 200 | + |
| 201 | + // ===== TEST 7: Stop recording ===== |
| 202 | + await host.click(micButton); |
| 203 | + |
| 204 | + await pageConditions.became( |
| 205 | + 'Voice state is idle after stop', |
| 206 | + () => getVoiceState() === 'idle', |
| 207 | + 2000 |
| 208 | + ); |
| 209 | + |
| 210 | + expect(muteControlRef.muted).toBe(false); |
| 211 | + }); |
| 212 | + </script> |
| 213 | + </body> |
| 214 | +</html> |
0 commit comments