-
Notifications
You must be signed in to change notification settings - Fork 1.6k
Expand file tree
/
Copy pathcreateCognitiveServicesSpeechServicesPonyfillFactory.ts
More file actions
83 lines (76 loc) · 3.13 KB
/
createCognitiveServicesSpeechServicesPonyfillFactory.ts
File metadata and controls
83 lines (76 loc) · 3.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import { WebSpeechPonyfillFactory } from 'botframework-webchat-api';
import { AudioConfig } from 'microsoft-cognitiveservices-speech-sdk';
import { createSpeechServicesPonyfill } from 'web-speech-cognitive-services';
import createMicrophoneAudioConfigAndAudioContext from './speech/createMicrophoneAudioConfigAndAudioContext';
import removeBearerInAuthorizationToken from './speech/removeBearerInAuthorizationToken';
import CognitiveServicesAudioOutputFormat from './types/CognitiveServicesAudioOutputFormat';
import CognitiveServicesCredentials from './types/CognitiveServicesCredentials';
import CognitiveServicesTextNormalization from './types/CognitiveServicesTextNormalization';
export default function createCognitiveServicesSpeechServicesPonyfillFactory({
audioConfig,
audioContext,
audioInputDeviceId,
credentials,
enableTelemetry,
initialSilenceTimeout,
speechRecognitionEndpointId,
speechSynthesisDeploymentId,
speechSynthesisOutputFormat,
textNormalization
}: {
audioConfig?: AudioConfig;
audioContext?: AudioContext;
audioInputDeviceId?: string;
credentials: CognitiveServicesCredentials;
enableTelemetry?: true;
initialSilenceTimeout?: number | undefined;
speechRecognitionEndpointId?: string;
speechSynthesisDeploymentId?: string;
speechSynthesisOutputFormat?: CognitiveServicesAudioOutputFormat;
textNormalization?: CognitiveServicesTextNormalization;
}): WebSpeechPonyfillFactory {
if (!window.navigator.mediaDevices && !audioConfig) {
console.warn(
'botframework-webchat: Your browser does not support Web Audio or the page is not loaded via HTTPS or localhost. Cognitive Services Speech Services is disabled. However, you may pass a custom AudioConfig to enable speech in this environment.'
);
return () => ({});
}
if (audioConfig) {
audioInputDeviceId &&
console.warn(
'botframework-webchat: "audioConfig" and "audioInputDeviceId" cannot be set at the same time; ignoring "audioInputDeviceId".'
);
audioContext &&
console.warn(
'botframework-webchat: "audioConfig" and "audioContext" cannot be set at the same time; ignoring "audioContext" for speech recognition.'
);
} else {
({ audioConfig, audioContext } = createMicrophoneAudioConfigAndAudioContext({
audioContext,
audioInputDeviceId,
enableTelemetry
}));
}
return ({ referenceGrammarID } = {}) => {
const { SpeechGrammarList, SpeechRecognition, speechSynthesis, SpeechSynthesisUtterance } =
createSpeechServicesPonyfill({
audioConfig,
audioContext,
credentials: removeBearerInAuthorizationToken(credentials),
enableTelemetry,
initialSilenceTimeout,
referenceGrammars: referenceGrammarID ? [`luis/${referenceGrammarID}-PRODUCTION`] : [],
speechRecognitionEndpointId,
speechSynthesisDeploymentId,
speechSynthesisOutputFormat,
textNormalization
});
return {
resumeAudioContext: () => audioContext && audioContext.state === 'suspended' && audioContext.resume(),
SpeechGrammarList,
SpeechRecognition,
speechSynthesis,
SpeechSynthesisUtterance
};
};
}