|
| 1 | +package server |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "sync" |
| 6 | + "sync/atomic" |
| 7 | + "testing" |
| 8 | + "time" |
| 9 | + |
| 10 | + "github.com/stretchr/testify/assert" |
| 11 | + "github.com/stretchr/testify/require" |
| 12 | + |
| 13 | + "github.com/docker/docker-agent/pkg/api" |
| 14 | + "github.com/docker/docker-agent/pkg/concurrent" |
| 15 | + "github.com/docker/docker-agent/pkg/config" |
| 16 | + "github.com/docker/docker-agent/pkg/runtime" |
| 17 | + "github.com/docker/docker-agent/pkg/session" |
| 18 | + "github.com/docker/docker-agent/pkg/sessiontitle" |
| 19 | + "github.com/docker/docker-agent/pkg/tools" |
| 20 | +) |
| 21 | + |
| 22 | +// fakeRuntime is a minimal Runtime that records concurrent RunStream calls. |
| 23 | +type fakeRuntime struct { |
| 24 | + runtime.Runtime |
| 25 | + |
| 26 | + concurrentStreams atomic.Int32 |
| 27 | + maxConcurrent atomic.Int32 |
| 28 | + streamDelay time.Duration |
| 29 | +} |
| 30 | + |
| 31 | +func (f *fakeRuntime) RunStream(_ context.Context, _ *session.Session) <-chan runtime.Event { |
| 32 | + cur := f.concurrentStreams.Add(1) |
| 33 | + for { |
| 34 | + old := f.maxConcurrent.Load() |
| 35 | + if cur <= old || f.maxConcurrent.CompareAndSwap(old, cur) { |
| 36 | + break |
| 37 | + } |
| 38 | + } |
| 39 | + |
| 40 | + ch := make(chan runtime.Event) |
| 41 | + go func() { |
| 42 | + time.Sleep(f.streamDelay) |
| 43 | + f.concurrentStreams.Add(-1) |
| 44 | + close(ch) |
| 45 | + }() |
| 46 | + return ch |
| 47 | +} |
| 48 | + |
| 49 | +func (f *fakeRuntime) Resume(_ context.Context, _ runtime.ResumeRequest) {} |
| 50 | + |
| 51 | +func (f *fakeRuntime) ResumeElicitation(_ context.Context, _ tools.ElicitationAction, _ map[string]any) error { |
| 52 | + return nil |
| 53 | +} |
| 54 | + |
| 55 | +func newTestSessionManager(t *testing.T, sess *session.Session, fake *fakeRuntime) *SessionManager { |
| 56 | + t.Helper() |
| 57 | + |
| 58 | + ctx := t.Context() |
| 59 | + store := session.NewInMemorySessionStore() |
| 60 | + require.NoError(t, store.AddSession(ctx, sess)) |
| 61 | + |
| 62 | + sm := &SessionManager{ |
| 63 | + runtimeSessions: concurrent.NewMap[string, *activeRuntimes](), |
| 64 | + sessionStore: store, |
| 65 | + Sources: config.Sources{}, |
| 66 | + runConfig: &config.RuntimeConfig{}, |
| 67 | + } |
| 68 | + |
| 69 | + // Pre-register a runtime for this session so RunSession skips agent loading. |
| 70 | + sm.runtimeSessions.Store(sess.ID, &activeRuntimes{ |
| 71 | + runtime: fake, |
| 72 | + session: sess, |
| 73 | + titleGen: (*sessiontitle.Generator)(nil), |
| 74 | + }) |
| 75 | + |
| 76 | + return sm |
| 77 | +} |
| 78 | + |
| 79 | +// TestRunSession_ConcurrentRequestReturnsErrSessionBusy verifies that a |
| 80 | +// second RunSession call on a session that is already streaming returns |
| 81 | +// ErrSessionBusy instead of silently interleaving messages. |
| 82 | +func TestRunSession_ConcurrentRequestReturnsErrSessionBusy(t *testing.T) { |
| 83 | + t.Parallel() |
| 84 | + |
| 85 | + ctx := t.Context() |
| 86 | + sess := session.New() |
| 87 | + fake := &fakeRuntime{streamDelay: 500 * time.Millisecond} |
| 88 | + sm := newTestSessionManager(t, sess, fake) |
| 89 | + |
| 90 | + // Start the first stream. |
| 91 | + ch1, err := sm.RunSession(ctx, sess.ID, "agent", "root", []api.Message{ |
| 92 | + {Content: "first"}, |
| 93 | + }) |
| 94 | + require.NoError(t, err) |
| 95 | + |
| 96 | + // Give the goroutine a moment to acquire the streaming lock. |
| 97 | + time.Sleep(50 * time.Millisecond) |
| 98 | + |
| 99 | + // The second request should fail immediately with ErrSessionBusy. |
| 100 | + _, err = sm.RunSession(ctx, sess.ID, "agent", "root", []api.Message{ |
| 101 | + {Content: "second"}, |
| 102 | + }) |
| 103 | + require.ErrorIs(t, err, ErrSessionBusy) |
| 104 | + |
| 105 | + // Drain first stream to let it complete. |
| 106 | + for range ch1 { |
| 107 | + } |
| 108 | + |
| 109 | + // After the first stream finishes, a new request should succeed. |
| 110 | + ch3, err := sm.RunSession(ctx, sess.ID, "agent", "root", []api.Message{ |
| 111 | + {Content: "third"}, |
| 112 | + }) |
| 113 | + require.NoError(t, err) |
| 114 | + for range ch3 { |
| 115 | + } |
| 116 | +} |
| 117 | + |
| 118 | +// TestRunSession_MessagesNotAddedWhenBusy verifies that when a session |
| 119 | +// is busy, the rejected request does not mutate the session's messages. |
| 120 | +func TestRunSession_MessagesNotAddedWhenBusy(t *testing.T) { |
| 121 | + t.Parallel() |
| 122 | + |
| 123 | + ctx := t.Context() |
| 124 | + sess := session.New() |
| 125 | + fake := &fakeRuntime{streamDelay: 500 * time.Millisecond} |
| 126 | + sm := newTestSessionManager(t, sess, fake) |
| 127 | + |
| 128 | + ch1, err := sm.RunSession(ctx, sess.ID, "agent", "root", []api.Message{ |
| 129 | + {Content: "first"}, |
| 130 | + }) |
| 131 | + require.NoError(t, err) |
| 132 | + |
| 133 | + time.Sleep(50 * time.Millisecond) |
| 134 | + |
| 135 | + msgCountBefore := len(sess.GetAllMessages()) |
| 136 | + |
| 137 | + _, err = sm.RunSession(ctx, sess.ID, "agent", "root", []api.Message{ |
| 138 | + {Content: "should not be added"}, |
| 139 | + }) |
| 140 | + require.ErrorIs(t, err, ErrSessionBusy) |
| 141 | + |
| 142 | + // Messages should not have been added. |
| 143 | + assert.Len(t, sess.GetAllMessages(), msgCountBefore) |
| 144 | + |
| 145 | + for range ch1 { |
| 146 | + } |
| 147 | +} |
| 148 | + |
| 149 | +// TestRunSession_SequentialRequestsSucceed verifies that sequential |
| 150 | +// (non-overlapping) requests on the same session work normally. |
| 151 | +func TestRunSession_SequentialRequestsSucceed(t *testing.T) { |
| 152 | + t.Parallel() |
| 153 | + |
| 154 | + ctx := t.Context() |
| 155 | + sess := session.New() |
| 156 | + fake := &fakeRuntime{streamDelay: 10 * time.Millisecond} |
| 157 | + sm := newTestSessionManager(t, sess, fake) |
| 158 | + |
| 159 | + for range 3 { |
| 160 | + ch, err := sm.RunSession(ctx, sess.ID, "agent", "root", []api.Message{ |
| 161 | + {Content: "hello"}, |
| 162 | + }) |
| 163 | + require.NoError(t, err) |
| 164 | + for range ch { |
| 165 | + } |
| 166 | + } |
| 167 | + |
| 168 | + assert.Equal(t, int32(1), fake.maxConcurrent.Load()) |
| 169 | +} |
| 170 | + |
| 171 | +// TestRunSession_DifferentSessionsConcurrently verifies that concurrent |
| 172 | +// requests on *different* sessions are not blocked by each other. |
| 173 | +func TestRunSession_DifferentSessionsConcurrently(t *testing.T) { |
| 174 | + t.Parallel() |
| 175 | + |
| 176 | + ctx := t.Context() |
| 177 | + store := session.NewInMemorySessionStore() |
| 178 | + fake1 := &fakeRuntime{streamDelay: 200 * time.Millisecond} |
| 179 | + fake2 := &fakeRuntime{streamDelay: 200 * time.Millisecond} |
| 180 | + |
| 181 | + sess1 := session.New() |
| 182 | + sess2 := session.New() |
| 183 | + require.NoError(t, store.AddSession(ctx, sess1)) |
| 184 | + require.NoError(t, store.AddSession(ctx, sess2)) |
| 185 | + |
| 186 | + sm := &SessionManager{ |
| 187 | + runtimeSessions: concurrent.NewMap[string, *activeRuntimes](), |
| 188 | + sessionStore: store, |
| 189 | + Sources: config.Sources{}, |
| 190 | + runConfig: &config.RuntimeConfig{}, |
| 191 | + } |
| 192 | + |
| 193 | + sm.runtimeSessions.Store(sess1.ID, &activeRuntimes{ |
| 194 | + runtime: fake1, session: sess1, titleGen: (*sessiontitle.Generator)(nil), |
| 195 | + }) |
| 196 | + sm.runtimeSessions.Store(sess2.ID, &activeRuntimes{ |
| 197 | + runtime: fake2, session: sess2, titleGen: (*sessiontitle.Generator)(nil), |
| 198 | + }) |
| 199 | + |
| 200 | + var wg sync.WaitGroup |
| 201 | + wg.Add(2) |
| 202 | + |
| 203 | + go func() { |
| 204 | + defer wg.Done() |
| 205 | + ch, err := sm.RunSession(ctx, sess1.ID, "agent", "root", []api.Message{{Content: "a"}}) |
| 206 | + assert.NoError(t, err) |
| 207 | + for range ch { |
| 208 | + } |
| 209 | + }() |
| 210 | + |
| 211 | + go func() { |
| 212 | + defer wg.Done() |
| 213 | + ch, err := sm.RunSession(ctx, sess2.ID, "agent", "root", []api.Message{{Content: "b"}}) |
| 214 | + assert.NoError(t, err) |
| 215 | + for range ch { |
| 216 | + } |
| 217 | + }() |
| 218 | + |
| 219 | + wg.Wait() |
| 220 | + |
| 221 | + // Both sessions should have streamed (1 each). |
| 222 | + assert.Equal(t, int32(1), fake1.maxConcurrent.Load()) |
| 223 | + assert.Equal(t, int32(1), fake2.maxConcurrent.Load()) |
| 224 | +} |
0 commit comments