Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 7 additions & 5 deletions pkg/agentdrain/cluster.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
package agentdrain

import "github.com/github/gh-aw/pkg/logger"
import (
"slices"

"github.com/github/gh-aw/pkg/logger"
)

var clusterLog = logger.New("agentdrain:cluster")

Expand All @@ -21,16 +25,14 @@ func newClusterStore() *clusterStore {
func (s *clusterStore) add(template []string, stage string) *Cluster {
id := s.nextID
s.nextID++
tmpl := make([]string, len(template))
copy(tmpl, template)
c := &Cluster{
ID: id,
Template: tmpl,
Template: slices.Clone(template),
Size: 1,
Stage: stage,
}
s.clusters[id] = c
clusterLog.Printf("Created new cluster: id=%d, stage=%s, template_length=%d", id, stage, len(tmpl))
clusterLog.Printf("Created new cluster: id=%d, stage=%s, template_length=%d", id, stage, len(c.Template))
return c
}

Expand Down
10 changes: 4 additions & 6 deletions pkg/agentdrain/mask.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"strings"

"github.com/github/gh-aw/pkg/logger"
"github.com/github/gh-aw/pkg/sliceutil"
)

var maskLog = logger.New("agentdrain:mask")
Expand Down Expand Up @@ -63,12 +64,9 @@ func FlattenEvent(evt AgentEvent, excludeFields []string) string {
excluded[f] = true
}

keys := make([]string, 0, len(evt.Fields))
for k := range evt.Fields {
if !excluded[k] {
keys = append(keys, k)
}
}
keys := sliceutil.FilterMapKeys(evt.Fields, func(k string, _ string) bool {
return !excluded[k]
})
sort.Strings(keys)

parts := make([]string, 0, len(keys)+1)
Expand Down
14 changes: 6 additions & 8 deletions pkg/agentdrain/persist.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package agentdrain
import (
"encoding/json"
"fmt"
"slices"

"github.com/github/gh-aw/pkg/logger"
)
Expand Down Expand Up @@ -31,15 +32,14 @@ func (m *Miner) SaveJSON() ([]byte, error) {

persistLog.Printf("Saving miner state: clusters=%d", len(m.store.clusters))
snap := Snapshot{
Config: m.cfg,
NextID: m.store.nextID,
Config: m.cfg,
NextID: m.store.nextID,
Clusters: make([]SnapshotCluster, 0, len(m.store.clusters)),
}
Comment on lines +35 to 38
Copy link

Copilot AI Apr 14, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

SaveJSON now initializes Snapshot.Clusters with an empty (non-nil) slice. When there are zero clusters this changes the serialized JSON from "clusters": null (previous behavior) to "clusters": [], which is an observable wire-format change (and may diverge from existing persisted snapshots / pkg/agentdrain/data/default_weights.json that currently contains clusters: null for some stages). If compatibility is important, keep Clusters nil when len(m.store.clusters)==0 (only preallocate when non-empty), or explicitly decide on the new format and update the PR description + any committed snapshot data accordingly.

Suggested change
Config: m.cfg,
NextID: m.store.nextID,
Clusters: make([]SnapshotCluster, 0, len(m.store.clusters)),
}
Config: m.cfg,
NextID: m.store.nextID,
}
if len(m.store.clusters) > 0 {
snap.Clusters = make([]SnapshotCluster, 0, len(m.store.clusters))
}

Copilot uses AI. Check for mistakes.
for _, c := range m.store.clusters {
tmpl := make([]string, len(c.Template))
copy(tmpl, c.Template)
snap.Clusters = append(snap.Clusters, SnapshotCluster{
ID: c.ID,
Template: tmpl,
Template: slices.Clone(c.Template),
Size: c.Size,
Stage: c.Stage,
})
Expand Down Expand Up @@ -71,11 +71,9 @@ func (m *Miner) LoadJSON(data []byte) error {
m.store.nextID = snap.NextID

for _, sc := range snap.Clusters {
tmpl := make([]string, len(sc.Template))
copy(tmpl, sc.Template)
c := &Cluster{
ID: sc.ID,
Template: tmpl,
Template: slices.Clone(sc.Template),
Size: sc.Size,
Stage: sc.Stage,
}
Expand Down
Loading