diff --git a/backend/tests/Taskdeck.Api.Tests/Resilience/QueueAccumulationResilienceTests.cs b/backend/tests/Taskdeck.Api.Tests/Resilience/QueueAccumulationResilienceTests.cs
new file mode 100644
index 000000000..0cb6cc4d2
--- /dev/null
+++ b/backend/tests/Taskdeck.Api.Tests/Resilience/QueueAccumulationResilienceTests.cs
@@ -0,0 +1,164 @@
+using System.Net;
+using System.Net.Http.Json;
+using FluentAssertions;
+using Microsoft.Extensions.DependencyInjection;
+using Taskdeck.Api.Tests.Support;
+using Taskdeck.Application.DTOs;
+using Taskdeck.Application.Interfaces;
+using Taskdeck.Domain.Entities;
+using Taskdeck.Domain.Enums;
+using Taskdeck.Infrastructure.Persistence;
+using Xunit;
+
+namespace Taskdeck.Api.Tests.Resilience;
+
+///
+/// Tests that queue items accumulate correctly when workers are not processing
+/// (simulated by having processing disabled or worker stopped), and that items
+/// remain consistent (no corruption) and are processable on restart.
+/// Covers issue #720 (TST-67): "All workers stopped → queue items accumulate
+/// but don't corrupt; restart processes them."
+///
+public class QueueAccumulationResilienceTests : IClassFixture
+{
+ private readonly TestWebApplicationFactory _factory;
+
+ public QueueAccumulationResilienceTests(TestWebApplicationFactory factory)
+ {
+ _factory = factory;
+ }
+
+ // ── Queue Items Accumulate Without Corruption ─────────────────────
+
+ [Fact]
+ public async Task QueueItems_AccumulateWithoutCorruption_WhenWorkersNotProcessing()
+ {
+ // Arrange: create a user, then enqueue multiple capture items.
+ // The background worker may process them, but the important assertion is
+ // that items are created with correct status and no data corruption occurs
+ // regardless of worker state.
+ using var client = _factory.CreateClient();
+ await ApiTestHarness.AuthenticateAsync(client, "queue-accum-resilience");
+
+ // Create multiple capture items in quick succession.
+ var itemIds = new List();
+ for (var i = 0; i < 5; i++)
+ {
+ var response = await client.PostAsJsonAsync(
+ "/api/capture/items",
+ new CreateCaptureItemDto(null, $"Queue accumulation test item {i}"));
+ response.StatusCode.Should().Be(HttpStatusCode.Created,
+ $"capture item {i} should be accepted");
+
+ var item = await response.Content.ReadFromJsonAsync();
+ item.Should().NotBeNull();
+ itemIds.Add(item!.Id);
+ }
+
+ // Assert: all items should exist and have consistent state.
+ var listResponse = await client.GetAsync("/api/capture/items?limit=100");
+ listResponse.StatusCode.Should().Be(HttpStatusCode.OK);
+
+ var listPayload = await listResponse.Content.ReadFromJsonAsync();
+ listPayload.Should().NotBeNull();
+
+ // All 5 items should be present (they may have been processed already by the worker,
+ // but none should be missing or corrupted).
+ foreach (var id in itemIds)
+ {
+ listPayload!.Should().Contain(
+ i => i.Id == id,
+ $"item {id} should exist in the queue regardless of worker state");
+ }
+
+ // No item should be in an invalid/corrupted status.
+ foreach (var item in listPayload!.Where(i => itemIds.Contains(i.Id)))
+ {
+ item.Status.Should().BeDefined(
+ "item status should always be set to a valid enum value");
+ }
+ }
+
+ // ── Queue Items Are Processable After Accumulation ───────────────
+
+ [Fact]
+ public async Task QueuedItems_RemainProcessable_AfterAccumulation()
+ {
+ // Verify that items created during worker downtime have valid status
+ // and are in a state that allows future processing.
+ using var scope = _factory.Services.CreateScope();
+ var dbContext = scope.ServiceProvider.GetRequiredService();
+
+ var user = new User("queue-processable-user", "queue-processable@example.com", "hash");
+ dbContext.Users.Add(user);
+ await dbContext.SaveChangesAsync();
+
+ // Create LLM queue items directly (bypassing API to simulate accumulated items).
+ var items = new List();
+ for (var i = 0; i < 3; i++)
+ {
+ var item = new LlmRequest(user.Id, "instruction", $"Create card {i}", null);
+ items.Add(item);
+ dbContext.Add(item);
+ }
+ await dbContext.SaveChangesAsync();
+
+ // Assert: all items should start as Pending and be individually processable.
+ foreach (var item in items)
+ {
+ await dbContext.Entry(item).ReloadAsync();
+ item.Status.Should().Be(RequestStatus.Pending,
+ "accumulated items should be in Pending status, ready for processing");
+ item.RetryCount.Should().Be(0,
+ "fresh items should have zero retry count");
+ }
+
+ // Simulate a worker picking up the first item (MarkAsProcessing).
+ items[0].MarkAsProcessing();
+ await dbContext.SaveChangesAsync();
+ await dbContext.Entry(items[0]).ReloadAsync();
+
+ items[0].Status.Should().Be(RequestStatus.Processing,
+ "first item should transition to Processing when claimed by a worker");
+
+ // Other items should remain Pending (not affected by the first item's transition).
+ await dbContext.Entry(items[1]).ReloadAsync();
+ await dbContext.Entry(items[2]).ReloadAsync();
+ items[1].Status.Should().Be(RequestStatus.Pending,
+ "other items should remain Pending when one is claimed");
+ items[2].Status.Should().Be(RequestStatus.Pending);
+ }
+
+ // ── Capture Items Do Not Corrupt Under Rapid Submission ──────────
+
+ [Fact]
+ public async Task RapidCaptureSubmission_DoesNotCorruptQueue()
+ {
+ using var client = _factory.CreateClient();
+ await ApiTestHarness.AuthenticateAsync(client, "queue-rapid-submit");
+
+ // Submit captures as fast as possible (no await between sends).
+ var tasks = Enumerable.Range(0, 10).Select(i =>
+ client.PostAsJsonAsync(
+ "/api/capture/items",
+ new CreateCaptureItemDto(null, $"Rapid item {i}")));
+
+ var responses = await Task.WhenAll(tasks);
+
+ // All submissions should succeed (201 Created).
+ foreach (var response in responses)
+ {
+ response.StatusCode.Should().Be(HttpStatusCode.Created,
+ "every rapid submission should succeed without corruption");
+ }
+
+ // Verify items are retrievable.
+ var listResponse = await client.GetAsync("/api/capture/items?limit=100");
+ listResponse.StatusCode.Should().Be(HttpStatusCode.OK);
+
+ var payload = await listResponse.Content.ReadFromJsonAsync();
+ payload.Should().NotBeNull();
+ payload!.Should().HaveCountGreaterThanOrEqualTo(10,
+ "all rapidly submitted items should be present");
+ }
+}
diff --git a/backend/tests/Taskdeck.Application.Tests/Services/LlmProviderResilienceTests.cs b/backend/tests/Taskdeck.Application.Tests/Services/LlmProviderResilienceTests.cs
new file mode 100644
index 000000000..15f09c5b9
--- /dev/null
+++ b/backend/tests/Taskdeck.Application.Tests/Services/LlmProviderResilienceTests.cs
@@ -0,0 +1,355 @@
+using System.Net;
+using System.Text;
+using FluentAssertions;
+using Microsoft.Extensions.Logging.Abstractions;
+using Taskdeck.Application.Services;
+using Taskdeck.Application.Tests.TestUtilities;
+using Taskdeck.Tests.Support;
+using Xunit;
+
+namespace Taskdeck.Application.Tests.Services;
+
+///
+/// Resilience tests for LLM providers: garbage responses, rate limiting (429),
+/// network timeouts, and empty/null responses. Validates that every failure mode
+/// produces a degraded response rather than an unhandled exception.
+/// Covers issue #720 (TST-67).
+///
+public class LlmProviderResilienceTests
+{
+ // ── OpenAI: Garbage Response (Invalid JSON Body) ─────────────────
+
+ [Fact]
+ public async Task OpenAi_CompleteAsync_GarbageResponseBody_ReturnsDegradedResult()
+ {
+ var settings = BuildOpenAiSettings();
+ var handler = new StubHttpMessageHandler(_ =>
+ new HttpResponseMessage(HttpStatusCode.OK)
+ {
+ Content = new StringContent(
+ "this is not json at all 502 Bad Gateway",
+ Encoding.UTF8,
+ "text/html")
+ });
+ var provider = new OpenAiLlmProvider(
+ new HttpClient(handler), settings, NullLogger.Instance);
+
+ var result = await provider.CompleteAsync(new ChatCompletionRequest(
+ [new ChatCompletionMessage("User", "create a card")]));
+
+ result.Should().NotBeNull("provider must never return null");
+ result.IsDegraded.Should().BeTrue("garbage response should be flagged as degraded");
+ result.DegradedReason.Should().NotBeNullOrWhiteSpace(
+ "degraded reason should explain why the response is degraded");
+ result.Provider.Should().Be("OpenAI");
+ }
+
+ [Fact]
+ public async Task OpenAi_CompleteAsync_EmptyResponseBody_ReturnsDegradedResult()
+ {
+ var settings = BuildOpenAiSettings();
+ var handler = new StubHttpMessageHandler(_ =>
+ new HttpResponseMessage(HttpStatusCode.OK)
+ {
+ Content = new StringContent("", Encoding.UTF8, "application/json")
+ });
+ var provider = new OpenAiLlmProvider(
+ new HttpClient(handler), settings, NullLogger.Instance);
+
+ var result = await provider.CompleteAsync(new ChatCompletionRequest(
+ [new ChatCompletionMessage("User", "list my tasks")]));
+
+ result.Should().NotBeNull();
+ result.IsDegraded.Should().BeTrue("empty body should produce degraded response");
+ }
+
+ [Fact]
+ public async Task OpenAi_CompleteAsync_ValidJsonButMissingChoices_ReturnsDegradedResult()
+ {
+ var settings = BuildOpenAiSettings();
+ var handler = new StubHttpMessageHandler(_ =>
+ new HttpResponseMessage(HttpStatusCode.OK)
+ {
+ Content = new StringContent(
+ """{"error": "unexpected format", "usage": {"total_tokens": 0}}""",
+ Encoding.UTF8,
+ "application/json")
+ });
+ var provider = new OpenAiLlmProvider(
+ new HttpClient(handler), settings, NullLogger.Instance);
+
+ var result = await provider.CompleteAsync(new ChatCompletionRequest(
+ [new ChatCompletionMessage("User", "hello")]));
+
+ result.Should().NotBeNull();
+ result.IsDegraded.Should().BeTrue("response with no choices array should be degraded");
+ }
+
+ // ── OpenAI: Rate Limiting (429) ─────────────────────────────────
+
+ [Fact]
+ public async Task OpenAi_CompleteAsync_Returns429RateLimited_ReturnsDegradedResult()
+ {
+ var settings = BuildOpenAiSettings();
+ var handler = new StubHttpMessageHandler(_ =>
+ {
+ var response = new HttpResponseMessage((HttpStatusCode)429)
+ {
+ Content = new StringContent(
+ """{"error": {"message": "Rate limit exceeded", "type": "tokens", "code": "rate_limit_exceeded"}}""",
+ Encoding.UTF8,
+ "application/json")
+ };
+ response.Headers.Add("Retry-After", "30");
+ return response;
+ });
+ var provider = new OpenAiLlmProvider(
+ new HttpClient(handler), settings, NullLogger.Instance);
+
+ var result = await provider.CompleteAsync(new ChatCompletionRequest(
+ [new ChatCompletionMessage("User", "create a card")]));
+
+ result.Should().NotBeNull("429 should produce a degraded result, not throw");
+ result.IsDegraded.Should().BeTrue("rate-limited response should be flagged as degraded");
+ result.DegradedReason.Should().Contain("failed",
+ "degraded reason should indicate the request failed");
+ result.Provider.Should().Be("OpenAI");
+ }
+
+ [Fact]
+ public async Task OpenAi_CompleteAsync_Returns429_DoesNotThrowException()
+ {
+ var settings = BuildOpenAiSettings();
+ var handler = new StubHttpMessageHandler(_ =>
+ new HttpResponseMessage((HttpStatusCode)429)
+ {
+ Content = new StringContent(
+ """{"error": {"message": "Too many requests"}}""",
+ Encoding.UTF8,
+ "application/json")
+ });
+ var provider = new OpenAiLlmProvider(
+ new HttpClient(handler), settings, NullLogger.Instance);
+
+ var act = async () => await provider.CompleteAsync(new ChatCompletionRequest(
+ [new ChatCompletionMessage("User", "test")]));
+
+ await act.Should().NotThrowAsync(
+ "rate limiting must never cause an unhandled exception");
+ }
+
+ // ── OpenAI: Network Timeout ─────────────────────────────────────
+
+ [Fact]
+ public async Task OpenAi_CompleteAsync_HttpClientThrowsTimeout_PropagatesException()
+ {
+ var settings = BuildOpenAiSettings();
+ var handler = new StubHttpMessageHandler((_, _) =>
+ throw new TaskCanceledException("The request was canceled due to the configured HttpClient.Timeout"));
+ var logger = new InMemoryLogger();
+ var provider = new OpenAiLlmProvider(
+ new HttpClient(handler), settings, logger);
+
+ // TaskCanceledException from HttpClient timeout is an OperationCanceledException.
+ // The provider intentionally re-throws this exception so that the caller (e.g., the
+ // controller) can handle the timeout appropriately (e.g., by returning 504 Gateway Timeout).
+ var act = async () => await provider.CompleteAsync(new ChatCompletionRequest(
+ [new ChatCompletionMessage("User", "create a card")]));
+
+ await act.Should().ThrowAsync(
+ "timeout exceptions should propagate so the controller layer can handle them");
+ }
+
+ // ── OpenAI: Tool Calling with Garbage Response ──────────────────
+
+ [Fact]
+ public async Task OpenAi_CompleteWithToolsAsync_GarbageResponse_ReturnsDegradedToolResult()
+ {
+ var settings = BuildOpenAiSettings();
+ var handler = new StubHttpMessageHandler(_ =>
+ new HttpResponseMessage(HttpStatusCode.OK)
+ {
+ Content = new StringContent(
+ "NOT JSON AT ALL",
+ Encoding.UTF8,
+ "text/plain")
+ });
+ var provider = new OpenAiLlmProvider(
+ new HttpClient(handler), settings, NullLogger.Instance);
+
+ var tools = Array.Empty();
+ var result = await provider.CompleteWithToolsAsync(
+ new ChatCompletionRequest([new ChatCompletionMessage("User", "list cards")]),
+ tools);
+
+ result.Should().NotBeNull();
+ result.IsDegraded.Should().BeTrue("garbage tool-calling response should be degraded");
+ result.IsComplete.Should().BeTrue("degraded tool result should signal completion");
+ }
+
+ [Fact]
+ public async Task OpenAi_CompleteWithToolsAsync_Returns500_ReturnsDegradedToolResult()
+ {
+ var settings = BuildOpenAiSettings();
+ var handler = new StubHttpMessageHandler(_ =>
+ new HttpResponseMessage(HttpStatusCode.InternalServerError)
+ {
+ Content = new StringContent(
+ """{"error": {"message": "Internal server error"}}""",
+ Encoding.UTF8,
+ "application/json")
+ });
+ var provider = new OpenAiLlmProvider(
+ new HttpClient(handler), settings, NullLogger.Instance);
+
+ var tools = Array.Empty();
+ var result = await provider.CompleteWithToolsAsync(
+ new ChatCompletionRequest([new ChatCompletionMessage("User", "list cards")]),
+ tools);
+
+ result.Should().NotBeNull();
+ result.IsDegraded.Should().BeTrue("500 response should produce degraded tool result");
+ result.IsComplete.Should().BeTrue();
+ }
+
+ // ── Gemini: Garbage Response ────────────────────────────────────
+
+ [Fact]
+ public async Task Gemini_CompleteAsync_GarbageResponseBody_ReturnsDegradedResult()
+ {
+ var settings = BuildGeminiSettings();
+ var handler = new StubHttpMessageHandler(_ =>
+ new HttpResponseMessage(HttpStatusCode.OK)
+ {
+ Content = new StringContent(
+ "502 proxy error",
+ Encoding.UTF8,
+ "text/xml")
+ });
+ var provider = new GeminiLlmProvider(
+ new HttpClient(handler), settings, NullLogger.Instance);
+
+ var result = await provider.CompleteAsync(new ChatCompletionRequest(
+ [new ChatCompletionMessage("User", "create a task")]));
+
+ result.Should().NotBeNull();
+ result.IsDegraded.Should().BeTrue("garbage Gemini response should be degraded");
+ result.Provider.Should().Be("Gemini");
+ }
+
+ [Fact]
+ public async Task Gemini_CompleteAsync_Returns429RateLimited_ReturnsDegradedResult()
+ {
+ var settings = BuildGeminiSettings();
+ var handler = new StubHttpMessageHandler(_ =>
+ new HttpResponseMessage((HttpStatusCode)429)
+ {
+ Content = new StringContent(
+ """{"error": {"code": 429, "message": "Resource exhausted"}}""",
+ Encoding.UTF8,
+ "application/json")
+ });
+ var provider = new GeminiLlmProvider(
+ new HttpClient(handler), settings, NullLogger.Instance);
+
+ var result = await provider.CompleteAsync(new ChatCompletionRequest(
+ [new ChatCompletionMessage("User", "create a card")]));
+
+ result.Should().NotBeNull("429 should not crash");
+ result.IsDegraded.Should().BeTrue("rate-limited Gemini response should be degraded");
+ result.Provider.Should().Be("Gemini");
+ }
+
+ [Fact]
+ public async Task Gemini_CompleteAsync_EmptyResponseBody_ReturnsDegradedResult()
+ {
+ var settings = BuildGeminiSettings();
+ var handler = new StubHttpMessageHandler(_ =>
+ new HttpResponseMessage(HttpStatusCode.OK)
+ {
+ Content = new StringContent("", Encoding.UTF8, "application/json")
+ });
+ var provider = new GeminiLlmProvider(
+ new HttpClient(handler), settings, NullLogger.Instance);
+
+ var result = await provider.CompleteAsync(new ChatCompletionRequest(
+ [new ChatCompletionMessage("User", "hello")]));
+
+ result.Should().NotBeNull();
+ result.IsDegraded.Should().BeTrue();
+ }
+
+ // ── OpenAI: Health / Probe with degraded provider ───────────────
+
+ [Fact]
+ public async Task OpenAi_ProbeAsync_WhenProviderReturnsGarbage_ReportsUnhealthy()
+ {
+ var settings = BuildOpenAiSettings();
+ var handler = new StubHttpMessageHandler(_ =>
+ new HttpResponseMessage(HttpStatusCode.OK)
+ {
+ Content = new StringContent("not json", Encoding.UTF8, "text/plain")
+ });
+ var provider = new OpenAiLlmProvider(
+ new HttpClient(handler), settings, NullLogger.Instance);
+
+ var health = await provider.ProbeAsync();
+
+ health.IsAvailable.Should().BeFalse("probe should detect degraded responses as unhealthy");
+ health.IsProbed.Should().BeTrue();
+ }
+
+ // ── Gemini: Health / Probe with degraded provider ───────────────
+
+ [Fact]
+ public async Task Gemini_ProbeAsync_WhenProviderReturnsGarbage_ReportsUnhealthy()
+ {
+ var settings = BuildGeminiSettings();
+ var handler = new StubHttpMessageHandler(_ =>
+ new HttpResponseMessage(HttpStatusCode.OK)
+ {
+ Content = new StringContent("not json", Encoding.UTF8, "text/plain")
+ });
+ var provider = new GeminiLlmProvider(
+ new HttpClient(handler), settings, NullLogger.Instance);
+
+ var health = await provider.ProbeAsync();
+
+ health.IsAvailable.Should().BeFalse("probe should detect garbage as unhealthy");
+ health.IsProbed.Should().BeTrue();
+ }
+
+ // ── Helpers ──────────────────────────────────────────────────────
+
+ private static LlmProviderSettings BuildOpenAiSettings()
+ {
+ return new LlmProviderSettings
+ {
+ EnableLiveProviders = true,
+ Provider = "OpenAI",
+ OpenAi = new OpenAiProviderSettings
+ {
+ ApiKey = "test-key",
+ BaseUrl = "https://api.openai.com/v1",
+ Model = "gpt-4o-mini",
+ TimeoutSeconds = 30
+ }
+ };
+ }
+
+ private static LlmProviderSettings BuildGeminiSettings()
+ {
+ return new LlmProviderSettings
+ {
+ EnableLiveProviders = true,
+ Provider = "Gemini",
+ Gemini = new GeminiProviderSettings
+ {
+ ApiKey = "test-gemini-key",
+ BaseUrl = "https://generativelanguage.googleapis.com/v1beta",
+ Model = "gemini-2.5-flash",
+ TimeoutSeconds = 30
+ }
+ };
+ }
+}
diff --git a/frontend/taskdeck-web/src/tests/resilience/slowApiAndStorage.spec.ts b/frontend/taskdeck-web/src/tests/resilience/slowApiAndStorage.spec.ts
new file mode 100644
index 000000000..1d9e4d5a8
--- /dev/null
+++ b/frontend/taskdeck-web/src/tests/resilience/slowApiAndStorage.spec.ts
@@ -0,0 +1,382 @@
+/**
+ * Additional resilience tests for frontend: slow API responses, duplicate request
+ * prevention, and localStorage corruption/clearing mid-session.
+ * Issue #720 (TST-67): Covers slow API (5+ seconds) → loading indicators, no
+ * duplicate requests; and localStorage corrupted/cleared → graceful handling.
+ */
+import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
+import { createPinia, setActivePinia } from 'pinia'
+import { useBoardStore } from '../../store/boardStore'
+import { useCaptureStore } from '../../store/captureStore'
+import { useSessionStore } from '../../store/sessionStore'
+import { boardsApi } from '../../api/boardsApi'
+import { captureApi } from '../../api/captureApi'
+import * as tokenStorage from '../../utils/tokenStorage'
+
+// ─── global mocks ────────────────────────────────────────────────────────────
+
+const toastMocks = vi.hoisted(() => ({ success: vi.fn(), error: vi.fn(), info: vi.fn() }))
+
+vi.mock('../../store/toastStore', () => ({
+ useToastStore: () => toastMocks,
+}))
+
+vi.mock('../../composables/useErrorMapper', () => ({
+ getErrorDisplay: (_err: unknown, fallback: string) => ({ message: fallback, code: null }),
+}))
+
+vi.mock('../../api/boardsApi', () => ({
+ boardsApi: {
+ getBoards: vi.fn(),
+ getBoard: vi.fn(),
+ createBoard: vi.fn(),
+ updateBoard: vi.fn(),
+ deleteBoard: vi.fn(),
+ },
+}))
+
+vi.mock('../../api/cardsApi', () => ({
+ cardsApi: {
+ getCards: vi.fn(),
+ createCard: vi.fn(),
+ updateCard: vi.fn(),
+ moveCard: vi.fn(),
+ deleteCard: vi.fn(),
+ },
+}))
+
+vi.mock('../../api/cardCommentsApi', () => ({
+ cardCommentsApi: {
+ getComments: vi.fn(),
+ createComment: vi.fn(),
+ updateComment: vi.fn(),
+ deleteComment: vi.fn(),
+ },
+}))
+
+vi.mock('../../api/columnsApi', () => ({
+ columnsApi: {
+ createColumn: vi.fn(),
+ updateColumn: vi.fn(),
+ deleteColumn: vi.fn(),
+ },
+}))
+
+vi.mock('../../api/labelsApi', () => ({
+ labelsApi: {
+ getLabels: vi.fn(),
+ createLabel: vi.fn(),
+ updateLabel: vi.fn(),
+ deleteLabel: vi.fn(),
+ },
+}))
+
+vi.mock('../../api/captureApi', () => ({
+ captureApi: {
+ createItem: vi.fn(),
+ listItems: vi.fn(),
+ getItem: vi.fn(),
+ ignoreItem: vi.fn(),
+ cancelItem: vi.fn(),
+ enqueueTriage: vi.fn(),
+ batchTriage: vi.fn(),
+ updateSuggestion: vi.fn(),
+ },
+}))
+
+vi.mock('../../api/authApi', () => ({
+ authApi: {
+ login: vi.fn(),
+ register: vi.fn(),
+ changePassword: vi.fn(),
+ getProviders: vi.fn(),
+ exchangeOAuthCode: vi.fn(),
+ },
+}))
+
+vi.mock('../../api/usersApi', () => ({
+ usersApi: {
+ getUser: vi.fn(),
+ },
+}))
+
+// ─── helpers ─────────────────────────────────────────────────────────────────
+
+function makeNetworkError(message = 'Network Error'): Error {
+ return Object.assign(new Error(message), { code: 'ERR_NETWORK' })
+}
+
+/**
+ * Creates a promise that resolves after the specified delay, simulating a slow API.
+ * Uses vi.advanceTimersByTimeAsync for deterministic timer control.
+ */
+function makeSlowResponse(value: T, delayMs: number): Promise {
+ return new Promise((resolve) => {
+ setTimeout(() => resolve(value), delayMs)
+ })
+}
+
+// ─── boardStore — slow API resilience ────────────────────────────────────────
+
+describe('boardStore — slow API response handling', () => {
+ let store: ReturnType
+
+ beforeEach(() => {
+ setActivePinia(createPinia())
+ store = useBoardStore()
+ vi.clearAllMocks()
+ vi.useFakeTimers()
+ })
+
+ afterEach(() => {
+ vi.useRealTimers()
+ })
+
+ it('sets loading=true during a slow API call and clears it on completion', async () => {
+ const boards = [
+ {
+ id: 'b1',
+ name: 'Board',
+ description: '',
+ isArchived: false,
+ createdAt: new Date().toISOString(),
+ updatedAt: new Date().toISOString(),
+ columns: [],
+ },
+ ]
+ // Simulate a 5-second API response.
+ vi.mocked(boardsApi.getBoards).mockReturnValue(makeSlowResponse(boards, 5000))
+
+ const fetchPromise = store.fetchBoards()
+
+ // Immediately after starting, loading should be true.
+ expect(store.loading).toBe(true)
+
+ // Advance past the 5-second delay.
+ await vi.advanceTimersByTimeAsync(5000)
+ await fetchPromise
+
+ expect(store.loading).toBe(false)
+ expect(store.boards).toHaveLength(1)
+ })
+
+ it('does not fire duplicate requests during throttle window', async () => {
+ const boards = [
+ {
+ id: 'b1',
+ name: 'Board',
+ description: '',
+ isArchived: false,
+ createdAt: new Date().toISOString(),
+ updatedAt: new Date().toISOString(),
+ columns: [],
+ },
+ ]
+ vi.mocked(boardsApi.getBoards).mockResolvedValue(boards)
+
+ // First fetch — should hit API.
+ await store.fetchBoards()
+ expect(boardsApi.getBoards).toHaveBeenCalledTimes(1)
+
+ // Second fetch within throttle window — should be skipped.
+ await store.fetchBoards()
+ expect(boardsApi.getBoards).toHaveBeenCalledTimes(1)
+
+ // Advance past throttle window (5 seconds).
+ await vi.advanceTimersByTimeAsync(5001)
+
+ // Third fetch after throttle expires — should hit API again.
+ await store.fetchBoards()
+ expect(boardsApi.getBoards).toHaveBeenCalledTimes(2)
+ })
+
+ it('error state is set when slow API eventually fails', async () => {
+ vi.mocked(boardsApi.getBoards).mockReturnValue(
+ makeSlowResponse(null, 5000).then(() => {
+ throw makeNetworkError('Timeout after 5s')
+ }),
+ )
+
+ const fetchPromise = store.fetchBoards()
+ expect(store.loading).toBe(true)
+
+ // Attach rejection handler BEFORE advancing timers to avoid
+ // unhandled promise rejection when the timer fires the throw.
+ const rejectExpectation = expect(fetchPromise).rejects.toThrow()
+
+ await vi.advanceTimersByTimeAsync(5000)
+ await rejectExpectation
+
+ expect(store.loading).toBe(false)
+ expect(store.error).toBeTruthy()
+ })
+})
+
+// ─── captureStore — slow API resilience ──────────────────────────────────────
+
+describe('captureStore — slow API response handling', () => {
+ beforeEach(() => {
+ setActivePinia(createPinia())
+ vi.clearAllMocks()
+ vi.useFakeTimers()
+ })
+
+ afterEach(() => {
+ vi.useRealTimers()
+ })
+
+ it('handles slow createItem without crashing; shows toast on error after delay', async () => {
+ vi.mocked(captureApi.createItem).mockReturnValue(
+ makeSlowResponse(null, 6000).then(() => {
+ throw makeNetworkError('Slow timeout')
+ }),
+ )
+
+ const store = useCaptureStore()
+ const createPromise = store.createItem({ text: 'Slow capture', boardId: null })
+
+ // Attach rejection handler BEFORE advancing timers to avoid
+ // unhandled promise rejection when the timer fires the throw.
+ const settled = createPromise.catch(() => {
+ // Expected failure -- handled here to prevent unhandled rejection
+ })
+
+ await vi.advanceTimersByTimeAsync(6000)
+ await settled
+
+ expect(toastMocks.error).toHaveBeenCalled()
+ })
+})
+
+// ─── sessionStore — localStorage corruption and clearing ─────────────────────
+
+describe('sessionStore — localStorage corruption mid-session', () => {
+ beforeEach(() => {
+ setActivePinia(createPinia())
+ vi.clearAllMocks()
+ localStorage.clear()
+ })
+
+ it('gracefully handles localStorage cleared mid-session via restoreSession', () => {
+ const store = useSessionStore()
+
+ // Simulate a valid session being set (mock the login flow).
+ // We can't actually call login since the API is mocked, so we
+ // test restoreSession which is the path that runs on app init.
+
+ // First, seed localStorage with a previously valid session.
+ // Use a structurally INVALID token (not 3 base64url segments) to test cleanup.
+ localStorage.setItem('taskdeck_token', 'not-a-valid-jwt')
+ localStorage.setItem('taskdeck_session', JSON.stringify({
+ userId: 'u1',
+ username: 'testuser',
+ email: 'test@example.com',
+ }))
+
+ // restoreSession should detect the invalid token and clean up.
+ store.restoreSession()
+
+ expect(store.isAuthenticated).toBe(false)
+ expect(store.token).toBeFalsy()
+ expect(localStorage.getItem('taskdeck_token')).toBeNull()
+ })
+
+ it('handles corrupted JSON in localStorage session without throwing', () => {
+ localStorage.setItem('taskdeck_session', '{corrupted json!!!')
+
+ const act = () => {
+ setActivePinia(createPinia())
+ const store = useSessionStore()
+ store.restoreSession()
+ }
+
+ expect(act).not.toThrow()
+ // The session module should have cleaned up the corrupted data.
+ expect(localStorage.getItem('taskdeck_session')).toBeNull()
+ })
+
+ it('handles localStorage suddenly cleared after session was established', () => {
+ const store = useSessionStore()
+
+ // Call restoreSession on empty localStorage — no crash.
+ store.restoreSession()
+
+ expect(store.isAuthenticated).toBe(false)
+ expect(store.userId).toBeNull()
+ })
+
+ it('tokenStorage.getToken returns null and cleans up for corrupted token', () => {
+ localStorage.setItem('taskdeck_token', 'definitely-not-jwt-format')
+
+ const result = tokenStorage.getToken()
+
+ expect(result).toBeNull()
+ expect(localStorage.getItem('taskdeck_token')).toBeNull()
+ })
+
+ it('tokenStorage.getSession returns null and cleans up for corrupted session', () => {
+ localStorage.setItem('taskdeck_session', 'not-even-json')
+
+ const result = tokenStorage.getSession()
+
+ expect(result).toBeNull()
+ expect(localStorage.getItem('taskdeck_session')).toBeNull()
+ })
+
+ it('tokenStorage.getSession returns null for session with missing required fields', () => {
+ localStorage.setItem('taskdeck_session', JSON.stringify({ userId: 'u1' }))
+
+ const result = tokenStorage.getSession()
+
+ expect(result).toBeNull()
+ })
+
+ it('tokenStorage.setToken rejects and returns false for non-JWT strings', () => {
+ const result = tokenStorage.setToken('bad-token')
+
+ expect(result).toBe(false)
+ expect(localStorage.getItem('taskdeck_token')).toBeNull()
+ })
+})
+
+// ─── boardStore — loading state consistency under concurrent operations ──────
+
+describe('boardStore — loading state consistency', () => {
+ beforeEach(() => {
+ setActivePinia(createPinia())
+ vi.clearAllMocks()
+ })
+
+ it('loading is false initially and after a completed fetch', async () => {
+ const store = useBoardStore()
+ expect(store.loading).toBe(false)
+
+ vi.mocked(boardsApi.getBoards).mockResolvedValue([])
+ await store.fetchBoards()
+
+ expect(store.loading).toBe(false)
+ })
+
+ it('loading returns to false after a failed fetch', async () => {
+ const store = useBoardStore()
+ vi.mocked(boardsApi.getBoards).mockRejectedValue(makeNetworkError())
+
+ await expect(store.fetchBoards()).rejects.toThrow()
+
+ expect(store.loading).toBe(false)
+ })
+
+ it('error is cleared on a subsequent successful fetch', async () => {
+ const store = useBoardStore()
+
+ // First: fail
+ vi.mocked(boardsApi.getBoards).mockRejectedValueOnce(makeNetworkError())
+ await expect(store.fetchBoards()).rejects.toThrow()
+ expect(store.error).toBeTruthy()
+
+ // Second: succeed (use filter to bypass throttle)
+ vi.mocked(boardsApi.getBoards).mockResolvedValueOnce([])
+ await store.fetchBoards('bypass-throttle')
+ expect(store.error).toBeNull()
+ })
+})