forked from Expensify/App
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathSequentialQueue.ts
More file actions
615 lines (550 loc) · 25.3 KB
/
SequentialQueue.ts
File metadata and controls
615 lines (550 loc) · 25.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
import type {OnyxKey, OnyxUpdate} from 'react-native-onyx';
import Onyx from 'react-native-onyx';
import {setIsOpenAppFailureModalOpen} from '@libs/actions/isOpenAppFailureModalOpen';
import {endSpan, startSpan} from '@libs/telemetry/activeSpans';
import {
deleteRequestsByIndices as deletePersistedRequestsByIndices,
endRequestAndRemoveFromQueue as endPersistedRequestAndRemoveFromQueue,
getAll as getAllPersistedRequests,
getCommands,
onInitialization as onPersistedRequestsInitialization,
processNextRequest as processNextPersistedRequest,
rollbackOngoingRequest as rollbackOngoingPersistedRequest,
save as savePersistedRequest,
update as updatePersistedRequest,
} from '@libs/actions/PersistedRequests';
import {flushQueue, isEmpty} from '@libs/actions/QueuedOnyxUpdates';
import {isClientTheLeader} from '@libs/ActiveClientManager';
import {WRITE_COMMANDS} from '@libs/API/types';
import Log from '@libs/Log';
import {processWithMiddleware} from '@libs/Request';
import RequestThrottle from '@libs/RequestThrottle';
import CONST from '@src/CONST';
import ONYXKEYS from '@src/ONYXKEYS';
import type OnyxRequest from '@src/types/onyx/Request';
import type {AnyRequest, ConflictData} from '@src/types/onyx/Request';
import {isOffline, onReconnection} from './NetworkStore';
let shouldFailAllRequests: boolean;
// Use connectWithoutView since this is for network data and don't affect to any UI
Onyx.connectWithoutView({
key: ONYXKEYS.NETWORK,
callback: (network) => {
if (!network) {
return;
}
shouldFailAllRequests = !!network.shouldFailAllRequests;
},
});
type RequestError = Error & {
name?: string;
message?: string;
status?: string;
};
let resolveIsReadyPromise: ((args?: unknown[]) => void) | undefined;
let isReadyPromise = new Promise((resolve) => {
resolveIsReadyPromise = resolve;
});
// Resolve the isReadyPromise immediately so that the queue starts working as soon as the page loads
resolveIsReadyPromise?.();
let isSequentialQueueRunning = false;
let currentRequestPromise: Promise<void> | null = null;
let isQueuePaused = false;
const sequentialQueueRequestThrottle = new RequestThrottle('SequentialQueue');
/**
* Puts the queue into a paused state so that no requests will be processed
*/
function pause() {
if (isQueuePaused) {
Log.info('[SequentialQueue] Queue already paused');
return;
}
Log.info('[SequentialQueue] Pausing the queue');
isQueuePaused = true;
}
/**
* Gets the current Onyx queued updates, apply them and clear the queue if the queue is not paused.
*/
function flushOnyxUpdatesQueue() {
// The only situation where the queue is paused is if we found a gap between the app current data state and our server's. If that happens,
// we'll trigger async calls to make the client updated again. While we do that, we don't want to insert anything in Onyx.
if (isQueuePaused) {
Log.info('[SequentialQueue] Queue already paused');
return;
}
return flushQueue();
}
let queueFlushedDataToStore: Array<OnyxUpdate<OnyxKey>> = [];
// Use connectWithoutView since this is for network queue and don't affect to any UI
Onyx.connectWithoutView({
key: ONYXKEYS.QUEUE_FLUSHED_DATA,
callback: (val) => {
if (!val) {
return;
}
queueFlushedDataToStore = val;
},
});
function saveQueueFlushedData(...onyxUpdates: Array<OnyxUpdate<OnyxKey>>) {
// @ts-expect-error - will be solved in https://github.com/Expensify/App/issues/73830
const newValue = [...queueFlushedDataToStore, ...onyxUpdates];
// eslint-disable-next-line rulesdir/prefer-actions-set-data
return Onyx.set(ONYXKEYS.QUEUE_FLUSHED_DATA, newValue).then(() => {
Log.info('[SequentialQueue] QueueFlushedData has been stored.', false, {newValue});
});
}
function clearQueueFlushedData() {
// eslint-disable-next-line rulesdir/prefer-actions-set-data
return Onyx.set(ONYXKEYS.QUEUE_FLUSHED_DATA, null).then(() => {
queueFlushedDataToStore.length = 0;
Log.info('[SequentialQueue] QueueFlushedData has been cleared.');
});
}
function getQueueFlushedData() {
return queueFlushedDataToStore;
}
/**
* Process any persisted requests, when online, one at a time until the queue is empty.
*
* If a request fails due to some kind of network error, such as a request being throttled or when our backend is down, then we retry it with an exponential back off process until a response
* is successfully returned. The first time a request fails we set a random, small, initial wait time. After waiting, we retry the request. If there are subsequent failures the request wait
* time is doubled creating an exponential back off in the frequency of requests hitting the server. Since the initial wait time is random and it increases exponentially, the load of
* requests to our backend is evenly distributed and it gradually decreases with time, which helps the servers catch up.
*/
function process(): Promise<void> {
// When the queue is paused, return early. This prevents any new requests from happening. The queue will be flushed again when the queue is unpaused.
if (isQueuePaused) {
Log.info('[SequentialQueue] Unable to process. Queue is paused.');
return Promise.resolve();
}
if (isOffline()) {
Log.info('[SequentialQueue] Unable to process. We are offline.');
return Promise.resolve();
}
const persistedRequests = getAllPersistedRequests();
Log.info('[SequentialQueue] process() called', false, {
persistedRequestsLength: persistedRequests.length,
isSequentialQueueRunning,
});
if (persistedRequests.length === 0) {
Log.info('[SequentialQueue] Unable to process. No requests to process.');
return Promise.resolve();
}
const requestToProcess = processNextPersistedRequest();
if (!requestToProcess) {
Log.info('[SequentialQueue] Unable to process. No next request to handle.');
return Promise.resolve();
}
Log.info('[SequentialQueue] Starting to process request', false, {
command: requestToProcess.command,
isRollback: requestToProcess.isRollback ?? false,
persistWhenOngoing: requestToProcess.persistWhenOngoing ?? false,
});
const commandSpanId = `${CONST.TELEMETRY.SPAN_SEQUENTIAL_QUEUE_COMMAND}_${requestToProcess.command}`;
// Skip tracking PusherPing — it fires every 30s as a heartbeat and would spam telemetry
const shouldTrackSpan = requestToProcess.command !== WRITE_COMMANDS.PUSHER_PING;
if (shouldTrackSpan) {
startSpan(commandSpanId, {
name: CONST.TELEMETRY.SPAN_SEQUENTIAL_QUEUE_COMMAND,
op: CONST.TELEMETRY.SPAN_SEQUENTIAL_QUEUE_COMMAND,
attributes: {
[CONST.TELEMETRY.ATTRIBUTE_COMMAND_NAME]: requestToProcess.command,
},
});
}
// Set the current request to a promise awaiting its processing so that getCurrentRequest can be used to take some action after the current request has processed.
currentRequestPromise = processWithMiddleware(requestToProcess, true)
.then((response) => {
Log.info('[SequentialQueue] Request processed successfully', false, {
command: requestToProcess.command,
shouldPauseQueue: response?.shouldPauseQueue ?? false,
hasQueueFlushedData: !!requestToProcess.queueFlushedData,
});
// A response might indicate that the queue should be paused. This happens when a gap in onyx updates is detected between the client and the server and
// that gap needs resolved before the queue can continue.
if (response?.shouldPauseQueue) {
Log.info("[SequentialQueue] Handled 'shouldPauseQueue' in response. Pausing the queue.");
pause();
}
Log.info('[SequentialQueue] Removing persisted request because it was processed successfully.', false, {
command: requestToProcess.command,
remainingRequests: getAllPersistedRequests().length,
});
endSpan(commandSpanId);
endPersistedRequestAndRemoveFromQueue(requestToProcess);
if (requestToProcess.queueFlushedData) {
Log.info('[SequentialQueue] Will store queueFlushedData.', false, {
command: requestToProcess.command,
queueFlushedDataLength: requestToProcess.queueFlushedData.length,
});
saveQueueFlushedData(...requestToProcess.queueFlushedData);
}
sequentialQueueRequestThrottle.clear();
Log.info('[SequentialQueue] Continuing to process next request');
return process();
})
.catch((error: RequestError) => {
Log.info('[SequentialQueue] Request failed with error', false, {
command: requestToProcess.command,
errorName: error.name ?? 'unknown',
errorMessage: error.message ?? 'unknown',
errorStatus: error.status ?? 'unknown',
shouldFailAllRequests,
});
// On sign out we cancel any in flight requests from the user. Since that user is no longer signed in their requests should not be retried.
// Duplicate records don't need to be retried as they just mean the record already exists on the server
if (error.name === CONST.ERROR.REQUEST_CANCELLED || error.message === CONST.ERROR.DUPLICATE_RECORD || shouldFailAllRequests) {
if (shouldFailAllRequests) {
const onyxUpdates = [...((requestToProcess.failureData ?? []) as never), ...((requestToProcess.finallyData ?? []) as never)] as Array<OnyxUpdate<OnyxKey>>;
Log.info('[SequentialQueue] Applying failure and finally data because shouldFailAllRequests', false, {
command: requestToProcess.command,
updatesCount: onyxUpdates.length,
});
Onyx.update(onyxUpdates);
}
Log.info("[SequentialQueue] Removing persisted request because it failed and doesn't need to be retried.", false, {
command: requestToProcess.command,
errorName: error.name,
errorMessage: error.message,
});
endSpan(commandSpanId);
endPersistedRequestAndRemoveFromQueue(requestToProcess);
sequentialQueueRequestThrottle.clear();
return process();
}
// For rate limiting errors (429) on ResendValidateCode, don't retry to prevent spam
if (error.message === CONST.ERROR.THROTTLED && requestToProcess.command === WRITE_COMMANDS.RESEND_VALIDATE_CODE) {
Log.info('[SequentialQueue] RESEND_VALIDATE_CODE throttled, not retrying', false, {
command: requestToProcess.command,
});
Onyx.update(requestToProcess.failureData ?? []);
endSpan(commandSpanId);
endPersistedRequestAndRemoveFromQueue(requestToProcess);
sequentialQueueRequestThrottle.clear();
return process();
}
Log.info('[SequentialQueue] Will retry request after rollback and throttle delay', false, {
command: requestToProcess.command,
errorMessage: error.message,
});
endSpan(commandSpanId);
rollbackOngoingPersistedRequest();
return sequentialQueueRequestThrottle
.sleep(error, requestToProcess.command)
.then(() => {
Log.info('[SequentialQueue] Throttle delay completed, retrying request', false, {
command: requestToProcess.command,
});
return process();
})
.catch(() => {
Log.info('[SequentialQueue] Request failed too many times, giving up', false, {
command: requestToProcess.command,
errorMessage: error.message,
});
Onyx.update(requestToProcess.failureData ?? []);
endPersistedRequestAndRemoveFromQueue(requestToProcess);
sequentialQueueRequestThrottle.clear();
if (requestToProcess.command === WRITE_COMMANDS.OPEN_APP) {
setIsOpenAppFailureModalOpen(true);
}
return process();
});
});
return currentRequestPromise;
}
/**
* @param shouldResetPromise Determines whether the isReadyPromise should be reset.
* A READ request will wait until all the WRITE requests are done, using the isReadyPromise promise.
* Resetting can cause unresolved READ requests to hang if tied to the old promise,
* so some cases (e.g., unpausing) require skipping the reset to maintain proper behavior.
*/
function flush(shouldResetPromise = true) {
// When the queue is paused, return early. This will keep an requests in the queue and they will get flushed again when the queue is unpaused
if (isQueuePaused) {
Log.info('[SequentialQueue] Unable to flush. Queue is paused.');
return;
}
if (isSequentialQueueRunning) {
Log.info('[SequentialQueue] Unable to flush. Queue is already running.');
return;
}
const currentPersistedRequests = getAllPersistedRequests();
const persistedRequestsLength = currentPersistedRequests.length;
const hasOnyxUpdates = !isEmpty();
Log.info('[SequentialQueue] flush() called', false, {
shouldResetPromise,
persistedRequestsLength,
hasQueuedOnyxUpdates: hasOnyxUpdates,
isClientTheLeader: isClientTheLeader(),
});
if (persistedRequestsLength === 0 && !hasOnyxUpdates) {
Log.info('[SequentialQueue] Unable to flush. No requests or queued Onyx updates to process.');
return;
}
Log.info('[SequentialQueue] Checking if client is leader', false, {
persistedRequestsLength,
hasOnyxUpdates,
});
// ONYXKEYS.PERSISTED_REQUESTS is shared across clients, thus every client/tab will have a copy
// It is very important to only process the queue from leader client otherwise requests will be duplicated.
if (!isClientTheLeader()) {
Log.info('[SequentialQueue] Unable to flush. Client is not the leader.', false, {
persistedRequestsLength,
});
return;
}
Log.info('[SequentialQueue] Starting queue processing', false, {
persistedRequestsLength,
persistedCommands: getCommands(currentPersistedRequests),
});
isSequentialQueueRunning = true;
if (shouldResetPromise) {
// Reset the isReadyPromise so that the queue will be flushed as soon as the request is finished
isReadyPromise = new Promise((resolve) => {
resolveIsReadyPromise = resolve;
});
}
// Ensure persistedRequests are read from storage before proceeding with the queue
// Use connectWithoutView since this is for network queue and don't affect to any UI
const connection = Onyx.connectWithoutView({
key: ONYXKEYS.PERSISTED_REQUESTS,
// We exceptionally opt out of reusing the connection here to avoid extra callback calls due to
// an existing connection already made in PersistedRequests.ts.
reuseConnection: false,
callback: () => {
Log.info('[SequentialQueue] PERSISTED_REQUESTS loaded, starting process()', false, {
requestsLength: getAllPersistedRequests().length,
});
Onyx.disconnect(connection);
process().finally(() => {
const remainingRequests = getAllPersistedRequests().length;
Log.info('[SequentialQueue] Finished processing queue.', false, {
remainingRequests,
isOffline: isOffline(),
willResolvePromise: isOffline() || remainingRequests === 0,
});
isSequentialQueueRunning = false;
if (isOffline() || remainingRequests === 0) {
Log.info('[SequentialQueue] Resolving isReadyPromise', false, {
reason: isOffline() ? 'offline' : 'queue empty',
});
resolveIsReadyPromise?.();
}
currentRequestPromise = null;
// The queue can be paused when we sync the data with backend so we should only update the Onyx data when the queue is empty
if (remainingRequests === 0) {
Log.info('[SequentialQueue] Queue is empty, flushing Onyx updates');
flushOnyxUpdatesQueue()?.then(() => {
const queueFlushedData = getQueueFlushedData();
if (queueFlushedData.length === 0) {
Log.info('[SequentialQueue] No queueFlushedData to apply');
return;
}
Log.info('[SequentialQueue] Applying queueFlushedData', false, {
queueFlushedDataLength: queueFlushedData.length,
});
Onyx.update(queueFlushedData).then(() => {
Log.info('[SequentialQueue] QueueFlushedData has been applied and stored', false, {
queueFlushedDataLength: queueFlushedData.length,
});
clearQueueFlushedData();
});
});
} else {
Log.info('[SequentialQueue] Queue still has requests, NOT flushing Onyx updates', false, {
remainingRequests,
});
}
});
},
});
}
/**
* Unpauses the queue and flushes all the requests that were in it or were added to it while paused
*/
function unpause() {
if (!isQueuePaused) {
Log.info('[SequentialQueue] Unable to unpause queue. We are already processing.');
return;
}
const currentPersistedRequests = getAllPersistedRequests();
const numberOfPersistedRequests = currentPersistedRequests.length;
const persistedCommands = getCommands(currentPersistedRequests);
Log.info('[SequentialQueue] Unpausing the queue', false, {
numberOfPersistedRequests,
persistedCommands,
});
isQueuePaused = false;
// If there are no persisted requests, we need to flush the Onyx updates queue
if (numberOfPersistedRequests === 0) {
Log.info('[SequentialQueue] No persisted requests, flushing Onyx updates queue');
flushOnyxUpdatesQueue();
}
// When the queue is paused and then unpaused, we call flush which by defaults recreates the isReadyPromise.
// After all the WRITE requests are done, the isReadyPromise is resolved, but since it's a new instance of promise,
// the pending READ request never received the resolved callback. That's why we don't want to recreate
// the promise when unpausing the queue.
Log.info('[SequentialQueue] Calling flush(false) to start processing', false, {
numberOfPersistedRequests,
});
flush(false);
}
function isRunning(): boolean {
return isSequentialQueueRunning;
}
function isPaused(): boolean {
return isQueuePaused;
}
function getShouldFailAllRequests(): boolean {
return shouldFailAllRequests;
}
// Flush the queue when the connection resumes
onReconnection(flush);
// Flush the queue when the persisted requests are initialized
onPersistedRequestsInitialization(flush);
function handleConflictActions<TKey extends OnyxKey>(conflictAction: ConflictData, newRequest: OnyxRequest<TKey>) {
Log.info('[SequentialQueue] handleConflictActions', false, {
conflictType: conflictAction.type,
newCommand: newRequest.command,
currentQueueLength: getAllPersistedRequests().length,
});
if (conflictAction.type === 'push') {
Log.info('[SequentialQueue] Conflict resolution: PUSH', false, {
command: newRequest.command,
});
savePersistedRequest(newRequest);
} else if (conflictAction.type === 'replace') {
Log.info('[SequentialQueue] Conflict resolution: REPLACE', false, {
command: newRequest.command,
replaceIndex: conflictAction.index,
replacementRequest: conflictAction.request?.command ?? newRequest.command,
});
updatePersistedRequest(conflictAction.index, conflictAction.request ?? (newRequest as AnyRequest));
} else if (conflictAction.type === 'delete') {
Log.info('[SequentialQueue] Conflict resolution: DELETE', false, {
command: newRequest.command,
deleteIndices: conflictAction.indices,
willPushNewRequest: conflictAction.pushNewRequest ?? false,
hasNextAction: !!conflictAction.nextAction,
});
deletePersistedRequestsByIndices(conflictAction.indices);
if (conflictAction.pushNewRequest) {
Log.info('[SequentialQueue] Pushing new request after delete', false, {
command: newRequest.command,
});
savePersistedRequest(newRequest);
}
if (conflictAction.nextAction) {
Log.info('[SequentialQueue] Processing next conflict action', false, {
command: newRequest.command,
nextActionType: conflictAction.nextAction.type,
});
handleConflictActions(conflictAction.nextAction, newRequest);
}
} else {
Log.info('[SequentialQueue] No action performed, request ignored', false, {
command: newRequest.command,
conflictType: conflictAction.type,
});
}
}
function push<TKey extends OnyxKey>(newRequest: OnyxRequest<TKey>) {
const currentRequests = getAllPersistedRequests();
Log.info('[SequentialQueue] push() called', false, {
command: newRequest.command,
hasConflictChecker: !!newRequest.checkAndFixConflictingRequest,
currentQueueLength: currentRequests.length,
isOffline: isOffline(),
isSequentialQueueRunning,
});
if (newRequest.checkAndFixConflictingRequest) {
const requests = currentRequests;
Log.info('[SequentialQueue] Checking for conflicts', false, {
command: newRequest.command,
existingRequestsCount: requests.length,
});
const {conflictAction} = newRequest.checkAndFixConflictingRequest(requests as Array<OnyxRequest<TKey>>);
Log.info('[SequentialQueue] Conflict action determined', false, {
command: newRequest.command,
conflictType: conflictAction.type,
});
// don't try to serialize a function.
// eslint-disable-next-line no-param-reassign
delete newRequest.checkAndFixConflictingRequest;
handleConflictActions(conflictAction, newRequest);
} else {
Log.info('[SequentialQueue] No conflict action. Adding request to Persisted Requests', false, {
command: newRequest.command,
});
// Add request to Persisted Requests so that it can be retried if it fails
savePersistedRequest(newRequest);
}
// If we are offline we don't need to trigger the queue to empty as it will happen when we come back online
if (isOffline()) {
Log.info('[SequentialQueue] Unable to push request due to offline status', false, {
command: newRequest.command,
queueLength: getAllPersistedRequests().length,
});
return;
}
// If the queue is running this request will run once it has finished processing the current batch
if (isSequentialQueueRunning) {
Log.info('[SequentialQueue] Queue is running. Will flush when the current request is finished.', false, {
command: newRequest.command,
});
isReadyPromise.then(() => {
Log.info('[SequentialQueue] isReadyPromise resolved, flushing queue', false, {
command: newRequest.command,
});
flush(true);
});
return;
}
Log.info('[SequentialQueue] Queue is not running. Flushing the queue.', false, {
command: newRequest.command,
});
flush(true);
}
function getCurrentRequest(): Promise<void> {
if (currentRequestPromise === null) {
return Promise.resolve();
}
return currentRequestPromise;
}
/**
* Returns a promise that resolves when the sequential queue is done processing all persisted write requests.
*/
function waitForIdle(): Promise<unknown> {
return isReadyPromise;
}
/**
* Clear any pending requests during test runs
* This is to prevent previous requests interfering with other tests
*/
function resetQueue(): void {
isSequentialQueueRunning = false;
currentRequestPromise = null;
isQueuePaused = false;
isReadyPromise = new Promise((resolve) => {
resolveIsReadyPromise = resolve;
});
resolveIsReadyPromise?.();
}
export {
flush,
getCurrentRequest,
getShouldFailAllRequests,
isPaused,
isRunning,
pause,
process,
push,
resetQueue,
sequentialQueueRequestThrottle,
unpause,
waitForIdle,
getQueueFlushedData,
saveQueueFlushedData,
clearQueueFlushedData,
};
export type {RequestError};