Skip to content

Commit 416407a

Browse files
authored
#17: Container Pool uses slices instead of linked lists
Refactor ContainerPool to use slices instead of linked lists
2 parents 86009be + d55abe4 commit 416407a

1 file changed

Lines changed: 130 additions & 81 deletions

File tree

internal/node/pool.go

Lines changed: 130 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
package node
22

33
import (
4-
"container/list"
54
"errors"
65
"log"
76
"time"
@@ -12,8 +11,9 @@ import (
1211
)
1312

1413
type ContainerPool struct {
15-
busy *list.List
16-
idle *list.List
14+
// for better efficiently we now use slices here instead of linked lists
15+
busy []*container.Container
16+
idle []*container.Container
1717
}
1818

1919
var NoWarmFoundErr = errors.New("no warm container is available")
@@ -30,20 +30,22 @@ func GetContainerPool(f *function.Function) *ContainerPool {
3030
}
3131

3232
func (fp *ContainerPool) popIdleContainer() (*container.Container, bool) {
33-
// TODO: picking most-recent / least-recent container might be better?
34-
elem := fp.idle.Front()
35-
if elem == nil {
33+
n := len(fp.idle)
34+
if n == 0 {
3635
return nil, false
3736
}
37+
// LIFO (maybe better for cache locality)
38+
c := fp.idle[n-1]
3839

39-
c := fp.idle.Remove(elem).(*container.Container)
40+
fp.idle[n-1] = nil // to favor garbage collection
41+
fp.idle = fp.idle[:n-1] // pop the slice
4042

4143
return c, true
4244
}
4345

4446
func (fp *ContainerPool) getReusableContainer(maxConcurrency int16) (*container.Container, bool) {
45-
for elem := fp.busy.Front(); elem != nil; elem = elem.Next() {
46-
c := elem.Value.(*container.Container)
47+
for _, elem := range fp.busy {
48+
c := elem
4749
if c.RequestsCount < maxConcurrency {
4850
return c, true
4951
}
@@ -53,11 +55,10 @@ func (fp *ContainerPool) getReusableContainer(maxConcurrency int16) (*container.
5355
}
5456

5557
func newContainerPool() *ContainerPool {
56-
fp := &ContainerPool{}
57-
fp.busy = list.New()
58-
fp.idle = list.New()
59-
60-
return fp
58+
return &ContainerPool{
59+
busy: make([]*container.Container, 0, 10),
60+
idle: make([]*container.Container, 0, 10),
61+
}
6162
}
6263

6364
func acquireNewMemory(mem int64, forWarmPool bool) bool {
@@ -119,7 +120,7 @@ func acquireWarmContainer(f *function.Function) (*container.Container, error) {
119120

120121
// add container to the busy pool
121122
c.RequestsCount = 1
122-
fp.busy.PushBack(c)
123+
fp.busy = append(fp.busy, c)
123124

124125
log.Printf("Using warm %s for %s. Now: %v", c.ID, f, &LocalResources)
125126
return c, nil
@@ -153,24 +154,30 @@ func HandleCompletion(cont *container.Container, f *function.Function) {
153154
if cont.RequestsCount == 0 {
154155
// the container is now idle and must be moved to the warm pool
155156
fp := GetContainerPool(f)
156-
// we must update the busy list by removing this element
157-
var deleted interface{}
158-
elem := fp.busy.Front()
159-
for ok := elem != nil; ok; ok = elem != nil {
160-
if elem.Value.(*container.Container) == cont {
161-
deleted = fp.busy.Remove(elem) // delete the element from the busy list
157+
// Search the container index in the slice
158+
idx := -1
159+
for i, c := range fp.busy {
160+
if c == cont { // with slices, we can compare pointers
161+
idx = i
162162
break
163163
}
164-
elem = elem.Next()
165164
}
166-
if deleted == nil {
167-
log.Println("Failed to release a container!")
165+
166+
if idx == -1 {
167+
log.Println("Failed to release a container! Not found in busy pool.")
168168
return
169169
}
170170

171+
// swap then pop from the slice. This way we don't have to
172+
lastIdx := len(fp.busy) - 1
173+
fp.busy[idx] = fp.busy[lastIdx] // swap between last element and the one we want to delete
174+
fp.busy[lastIdx] = nil // nil to favor garbage collection
175+
fp.busy = fp.busy[:lastIdx] // pop the slice
176+
177+
// finally, we add the container to the idle pool
171178
d := time.Duration(config.GetInt(config.CONTAINER_EXPIRATION_TIME, 600)) * time.Second
172179
cont.ExpirationTime = time.Now().Add(d).UnixNano()
173-
fp.idle.PushBack(cont)
180+
fp.idle = append(fp.idle, cont)
174181

175182
LocalResources.usedCPUs -= f.CPUDemand
176183
LocalResources.busyPoolUsedMem -= f.MemoryMB
@@ -216,10 +223,10 @@ func NewContainerWithAcquiredResources(fun *function.Function, startAsIdle bool,
216223

217224
fp := GetContainerPool(fun)
218225
if startAsIdle {
219-
fp.idle.PushBack(cont)
226+
fp.idle = append(fp.idle, cont)
220227
} else {
221228
cont.RequestsCount = 1
222-
fp.busy.PushBack(cont) // We immediately mark it as busy
229+
fp.busy = append(fp.busy, cont) // We immediately mark it as busy
223230
}
224231

225232
return cont, nil
@@ -231,6 +238,7 @@ func NewContainerWithAcquiredResourcesAsync(fun *function.Function, okCallback f
231238
if err != nil {
232239
log.Printf("Failed container creation: %v\n", err)
233240
errCallback(err)
241+
return
234242
}
235243

236244
LocalResources.Lock()
@@ -243,63 +251,80 @@ func NewContainerWithAcquiredResourcesAsync(fun *function.Function, okCallback f
243251

244252
fp := GetContainerPool(fun)
245253
cont.RequestsCount = 1
246-
fp.busy.PushBack(cont) // We immediately mark it as busy
254+
fp.busy = append(fp.busy, cont) // We immediately mark it as busy
247255
okCallback(cont)
248256
}()
249257
}
250258

251259
type itemToDismiss struct {
252-
contID container.ContainerID
260+
cont *container.Container
253261
pool *ContainerPool
254-
elem *list.Element
255262
memory int64
256263
}
257264

258-
// dismissContainer ... this function is used to get free memory used for a new container
259-
// 2-phases: first, we find idle container and collect them as a slice, second (cleanup phase) we delete the container only and only if
260-
// the sum of their memory is >= requiredMemoryMB is
265+
// removeContainerFromIdle removes a specific container from the idle pool.
266+
func (fp *ContainerPool) removeContainerFromIdle(target *container.Container) bool {
267+
for i, c := range fp.idle {
268+
if c == target { // pointer comparison
269+
// swap with the last element
270+
lastIdx := len(fp.idle) - 1
271+
fp.idle[i] = fp.idle[lastIdx]
272+
273+
fp.idle[lastIdx] = nil // for better garbage collection we delete the last element content before slicing
274+
275+
fp.idle = fp.idle[:lastIdx]
276+
return true
277+
}
278+
}
279+
return false
280+
}
281+
282+
// dismissContainer attempts to free memory by dismissing idle containers.
283+
// It works in 2 phases: research (collect candidates) and cleanup (destroy them).
284+
// Containers are actually cleaned only if they free enough memory for the new function/container
261285
func dismissContainer(requiredMemoryMB int64) (bool, error) {
262286
log.Printf("Trying to dismiss containers to free up at least %d MB", requiredMemoryMB)
263287
var cleanedMB int64 = 0
264288
var containerToDismiss []itemToDismiss
265289

266-
//first phase, research
290+
// Phase 1: Research
291+
// We iterate through all pools to find idle containers to (potentially) remove.
267292
for _, funPool := range LocalResources.containerPools {
268-
if funPool.idle.Len() > 0 {
269-
// every container into the funPool has the same memory (same function)
270-
//so it is not important which one you destroy
271-
elem := funPool.idle.Front()
272-
// container in the same pool need same memory
273-
memory, _ := container.GetMemoryMB(elem.Value.(*container.Container).ID)
274-
275-
for elem != nil {
276-
contID := elem.Value.(*container.Container).ID
277-
containerToDismiss = append(containerToDismiss, itemToDismiss{contID: contID, pool: funPool, elem: elem, memory: memory})
293+
294+
if len(funPool.idle) > 0 {
295+
for _, cont := range funPool.idle {
296+
memory, _ := container.GetMemoryMB(cont.ID)
297+
298+
// We collect the pointer to the container and the pool reference
299+
containerToDismiss = append(containerToDismiss,
300+
itemToDismiss{cont: cont, pool: funPool, memory: memory})
301+
278302
cleanedMB += memory
279303
if cleanedMB >= requiredMemoryMB {
280304
goto cleanup
281305
}
282-
elem = elem.Next()
283306
}
284307
}
285308
}
286309

287-
cleanup: // second phase, cleanup
288-
// memory check
289-
if cleanedMB >= requiredMemoryMB {
310+
cleanup: // Phase 2: Cleanup
311+
if cleanedMB >= requiredMemoryMB { // if we'd actually free enough memory we do it, otherwise there's no point
290312
for _, item := range containerToDismiss {
291-
item.pool.idle.Remove(item.elem) // remove the container from the funPool
292-
err := container.Destroy(item.contID) // destroy the container
293-
if err != nil {
294-
return false, err
313+
if item.pool.removeContainerFromIdle(item.cont) {
314+
// Destroy the actual container resources (Docker/Containerd)
315+
err := container.Destroy(item.cont.ID)
316+
if err != nil {
317+
return false, err
318+
}
319+
LocalResources.warmPoolUsedMem -= item.memory
295320
}
296-
LocalResources.warmPoolUsedMem -= item.memory
297321
}
298-
return true, nil
299322
} else {
300323
log.Printf("Not enough containers to free up at least %d MB (avail to dismiss: %d)", requiredMemoryMB, cleanedMB)
301-
return false, nil
324+
return false, errors.New("not enough containers to free up memory")
325+
302326
}
327+
return true, nil
303328
}
304329

305330
// DeleteExpiredContainer is called by the container cleaner
@@ -311,24 +336,44 @@ func DeleteExpiredContainer() {
311336
defer LocalResources.Unlock()
312337

313338
for _, pool := range LocalResources.containerPools {
314-
elem := pool.idle.Front()
315-
for ok := elem != nil; ok; ok = elem != nil {
316-
warm := elem.Value.(*container.Container)
339+
// Index to track the position of kept elements
340+
// Basically, since we now have slices, we want to avoid modifying the slice length while we're iterating
341+
// over it. So, we keep track of the element (containers) we want to keep, then we move them to the front of
342+
// the slice, and finally we cut the slice.
343+
// E.g.: if we have 8 containers, but 3 of them are expired, we cycle over the 8 containers, we put the 5 we need
344+
// to keep from the index 0 to 4, and then we cut the slice after the fifth element.
345+
// Finally, we "nil-out" the last 3 elements (in this example), to favor garbage collection.
346+
kept := 0
347+
348+
for _, warm := range pool.idle {
317349
if now > warm.ExpirationTime {
318-
temp := elem
319-
elem = elem.Next()
320-
pool.idle.Remove(temp) // remove the expired element
350+
// remove the expired container
321351

352+
// Update resources
322353
memory, _ := container.GetMemoryMB(warm.ID)
323354
LocalResources.warmPoolUsedMem -= memory
355+
356+
// Destroy the actual container
324357
err := container.Destroy(warm.ID)
325358
if err != nil {
326359
log.Printf("Error while destroying container %s: %s\n", warm.ID, err)
327360
}
361+
328362
} else {
329-
elem = elem.Next()
363+
// container is still valid: Keep it
364+
// Rewrite the element at the 'kept' position
365+
pool.idle[kept] = warm
366+
kept++ // position to write the (eventual) next container we need to keep
330367
}
331368
}
369+
370+
// finally, we set as nil the references to the containers we deleted
371+
for i := kept; i < len(pool.idle); i++ {
372+
pool.idle[i] = nil
373+
}
374+
375+
// Reslice to the new length
376+
pool.idle = pool.idle[:kept]
332377
}
333378
}
334379

@@ -343,21 +388,21 @@ func ShutdownWarmContainersFor(f *function.Function) {
343388
return
344389
}
345390

346-
containersToDelete := make([]container.ContainerID, 0)
391+
containersToDelete := make([]container.ContainerID, 0, len(fp.idle)) // we already know how long it'll need to be, so no need for reallocation
347392

348-
elem := fp.idle.Front()
349-
for ok := elem != nil; ok; ok = elem != nil {
350-
warmed := elem.Value.(*container.Container)
351-
temp := elem
352-
elem = elem.Next()
393+
// Iterate over the idle slice directly
394+
for i, warmed := range fp.idle {
353395
log.Printf("Removing container with ID %s\n", warmed.ID)
354-
fp.idle.Remove(temp)
355396

356397
memory, _ := container.GetMemoryMB(warmed.ID)
357398
LocalResources.warmPoolUsedMem -= memory
358399
containersToDelete = append(containersToDelete, warmed.ID)
400+
fp.idle[i] = nil
359401
}
360402

403+
// clear the slice
404+
fp.idle = fp.idle[:0]
405+
361406
go func(contIDs []container.ContainerID) {
362407
for _, contID := range contIDs {
363408
// No need to update available resources here
@@ -382,34 +427,38 @@ func ShutdownAllContainers() {
382427
continue // should not happen
383428
}
384429

385-
for elem := pool.idle.Front(); elem != nil; elem = elem.Next() {
386-
warmed := elem.Value.(*container.Container)
387-
temp := elem
430+
for i, warmed := range pool.idle {
388431
log.Printf("Removing container with ID %s\n", warmed.ID)
389-
pool.idle.Remove(temp)
390432

391433
err := container.Destroy(warmed.ID)
392434
if err != nil {
393435
log.Printf("Error while destroying container %s: %s", warmed.ID, err)
394436
}
395437
LocalResources.warmPoolUsedMem -= functionDescriptor.MemoryMB
438+
439+
// nil to help garbage collection
440+
pool.idle[i] = nil
396441
}
442+
// Reset the idle slice
443+
pool.idle = pool.idle[:0]
397444

398-
for elem := pool.busy.Front(); elem != nil; elem = elem.Next() {
399-
contID := elem.Value.(*container.Container).ID
400-
temp := elem
401-
log.Printf("Removing container with ID %s\n", contID)
402-
pool.idle.Remove(temp)
445+
// now we do the same but for busy containers
446+
for i, busyCont := range pool.busy {
447+
log.Printf("Removing container with ID %s\n", busyCont.ID)
403448

404-
err := container.Destroy(contID)
449+
err := container.Destroy(busyCont.ID)
405450
if err != nil {
406-
log.Printf("failed to destroy container %s: %v\n", contID, err)
451+
log.Printf("failed to destroy container %s: %v\n", busyCont.ID, err)
407452
continue
408453
}
409454

410455
LocalResources.usedCPUs -= functionDescriptor.CPUDemand
411456
LocalResources.busyPoolUsedMem -= functionDescriptor.MemoryMB
457+
458+
pool.busy[i] = nil
412459
}
460+
// Reset the busy slice capacity
461+
pool.busy = pool.busy[:0]
413462
}
414463
}
415464

@@ -419,7 +468,7 @@ func WarmStatus() map[string]int {
419468
defer LocalResources.RUnlock()
420469
warmPool := make(map[string]int)
421470
for funcName, pool := range LocalResources.containerPools {
422-
warmPool[funcName] = pool.idle.Len()
471+
warmPool[funcName] = len(pool.idle)
423472
}
424473

425474
return warmPool

0 commit comments

Comments
 (0)