Skip to content

Commit 6be949f

Browse files
added slog and shifted to structured logging. Also cleaned up the (#7)
example manifest
1 parent df7cea2 commit 6be949f

File tree

4 files changed

+100
-43
lines changed

4 files changed

+100
-43
lines changed

cmd/deployment-tracker/main.go

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ package main
33
import (
44
"context"
55
"flag"
6-
"fmt"
6+
"log/slog"
77
"os"
88
"os/signal"
99
"syscall"
@@ -38,38 +38,44 @@ func main() {
3838
flag.IntVar(&workers, "workers", 2, "number of worker goroutines")
3939
flag.Parse()
4040

41+
// init logging
42+
opts := slog.HandlerOptions{Level: slog.LevelInfo}
43+
slog.SetDefault(slog.New(slog.NewJSONHandler(os.Stdout, &opts)))
44+
4145
var cntrlCfg = controller.Config{
4246
Template: getEnvOrDefault("DN_TEMPLATE", defaultTemplate),
4347
LogicalEnvironment: os.Getenv("LOGICAL_ENVIRONMENT"),
4448
PhysicalEnvironment: os.Getenv("PHYSICAL_ENVIRONMENT"),
4549
Cluster: os.Getenv("CLUSTER"),
4650
APIToken: getEnvOrDefault("API_TOKEN", ""),
4751
BaseURL: getEnvOrDefault("BASE_URL", "api.github.com"),
48-
Org: os.Getenv("ORG"),
52+
Organization: os.Getenv("GITHUB_ORG"),
4953
}
5054

5155
if cntrlCfg.LogicalEnvironment == "" {
52-
fmt.Fprint(os.Stderr, "Logical environment is required\n")
56+
slog.Error("Logical environment is required")
5357
os.Exit(1)
5458
}
5559
if cntrlCfg.Cluster == "" {
56-
fmt.Fprint(os.Stderr, "Cluster is required\n")
60+
slog.Error("Cluster is required")
5761
os.Exit(1)
5862
}
59-
if cntrlCfg.Org == "" {
60-
fmt.Fprint(os.Stderr, "Org is required\n")
63+
if cntrlCfg.Organization == "" {
64+
slog.Error("Organiation is required")
6165
os.Exit(1)
6266
}
6367

6468
k8sCfg, err := createK8sConfig(kubeconfig)
6569
if err != nil {
66-
fmt.Fprintf(os.Stderr, "Error creating Kubernetes config: %v\n", err)
70+
slog.Error("Failed to create Kubernetes config",
71+
"error", err)
6772
os.Exit(1)
6873
}
6974

7075
clientset, err := kubernetes.NewForConfig(k8sCfg)
7176
if err != nil {
72-
fmt.Fprintf(os.Stderr, "Error creating Kubernetes client: %v\n", err)
77+
slog.Error("Error creating Kubernetes client",
78+
"error", err)
7379
os.Exit(1)
7480
}
7581

@@ -79,15 +85,16 @@ func main() {
7985
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
8086
go func() {
8187
<-sigCh
82-
fmt.Println("\nShutting down...")
88+
slog.Info("Shutting down...")
8389
cancel()
8490
}()
8591

8692
cntrl := controller.New(clientset, namespace, &cntrlCfg)
8793

88-
fmt.Println("Starting deployment-tracker controller")
94+
slog.Info("Starting deployment-tracker controller")
8995
if err := cntrl.Run(ctx, workers); err != nil {
90-
fmt.Fprintf(os.Stderr, "Error running controller: %v\n", err)
96+
slog.Error("Error running controller",
97+
"error", err)
9198
cancel()
9299
os.Exit(1)
93100
}

deploy/manifest.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,8 @@ spec:
5555
imagePullPolicy: IfNotPresent
5656
env:
5757
- name: DN_TEMPLATE
58-
value: "{{namespace}}_{{deploymentName}}_{{containerName}}"
59-
- name: ORG
58+
value: "{{namespace}}/{{deploymentName}}/{{containerName}}"
59+
- name: GITHUB_ORG
6060
value: "test-org"
6161
- name: LOGICAL_ENVIRONMENT
6262
value: "staging"

internal/controller/config.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,5 +17,5 @@ type Config struct {
1717
Cluster string
1818
APIToken string
1919
BaseURL string
20-
Org string
20+
Organization string
2121
}

internal/controller/controller.go

Lines changed: 79 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ package controller
33
import (
44
"context"
55
"errors"
6-
"fmt"
6+
"log/slog"
77
"strings"
88
"time"
99

@@ -40,7 +40,9 @@ func New(clientset kubernetes.Interface, namespace string, cfg *Config) *Control
4040
// Create informer factory
4141
var factory informers.SharedInformerFactory
4242
if namespace == "" {
43-
factory = informers.NewSharedInformerFactory(clientset, 30*time.Second)
43+
factory = informers.NewSharedInformerFactory(clientset,
44+
30*time.Second,
45+
)
4446
} else {
4547
factory = informers.NewSharedInformerFactoryWithOptions(
4648
clientset,
@@ -61,7 +63,11 @@ func New(clientset kubernetes.Interface, namespace string, cfg *Config) *Control
6163
if cfg.APIToken != "" {
6264
clientOpts = append(clientOpts, deploymentrecord.WithAPIToken(cfg.APIToken))
6365
}
64-
apiClient := deploymentrecord.NewClient(cfg.BaseURL, cfg.Org, clientOpts...)
66+
apiClient := deploymentrecord.NewClient(
67+
cfg.BaseURL,
68+
cfg.Organization,
69+
clientOpts...,
70+
)
6571

6672
cntrl := &Controller{
6773
clientset: clientset,
@@ -76,30 +82,41 @@ func New(clientset kubernetes.Interface, namespace string, cfg *Config) *Control
7682
AddFunc: func(obj any) {
7783
pod, ok := obj.(*corev1.Pod)
7884
if !ok {
79-
fmt.Printf("error: invalid object returned: %+v\n",
80-
obj)
85+
slog.Error("Invalid object returned",
86+
"object", obj,
87+
)
8188
return
8289
}
8390

8491
// Only process pods that are running
8592
if pod.Status.Phase == corev1.PodRunning {
8693
key, err := cache.MetaNamespaceKeyFunc(obj)
94+
95+
// For our purposes, there are in practice
96+
// no error event we care about, so don't
97+
// bother with handling it.
8798
if err == nil {
88-
queue.Add(PodEvent{Key: key, EventType: "CREATED", Pod: pod.DeepCopy()})
99+
queue.Add(PodEvent{
100+
Key: key,
101+
EventType: "CREATED",
102+
Pod: pod.DeepCopy(),
103+
})
89104
}
90105
}
91106
},
92107
UpdateFunc: func(oldObj, newObj any) {
93108
oldPod, ok := oldObj.(*corev1.Pod)
94109
if !ok {
95-
fmt.Printf("error: invalid object returned: %+v\n",
96-
oldObj)
110+
slog.Error("Invalid old object returned",
111+
"object", oldObj,
112+
)
97113
return
98114
}
99115
newPod, ok := newObj.(*corev1.Pod)
100116
if !ok {
101-
fmt.Printf("error: invalid object returned: %+v\n",
102-
newObj)
117+
slog.Error("Invalid new object returned",
118+
"object", newObj,
119+
)
103120
return
104121
}
105122

@@ -109,10 +126,19 @@ func New(clientset kubernetes.Interface, namespace string, cfg *Config) *Control
109126
}
110127

111128
// Only process if pod just became running
112-
if oldPod.Status.Phase != corev1.PodRunning && newPod.Status.Phase == corev1.PodRunning {
129+
if oldPod.Status.Phase != corev1.PodRunning &&
130+
newPod.Status.Phase == corev1.PodRunning {
113131
key, err := cache.MetaNamespaceKeyFunc(newObj)
132+
133+
// For our purposes, there are in practice
134+
// no error event we care about, so don't
135+
// bother with handling it.
114136
if err == nil {
115-
queue.Add(PodEvent{Key: key, EventType: "CREATED", Pod: newPod.DeepCopy()})
137+
queue.Add(PodEvent{
138+
Key: key,
139+
EventType: "CREATED",
140+
Pod: newPod.DeepCopy(),
141+
})
116142
}
117143
}
118144
},
@@ -130,14 +156,23 @@ func New(clientset kubernetes.Interface, namespace string, cfg *Config) *Control
130156
}
131157
}
132158
key, err := cache.MetaNamespaceKeyFunc(obj)
159+
160+
// For our purposes, there are in practice
161+
// no error event we care about, so don't
162+
// bother with handling it.
133163
if err == nil {
134-
queue.Add(PodEvent{Key: key, EventType: "DELETED", Pod: pod.DeepCopy()})
164+
queue.Add(PodEvent{
165+
Key: key,
166+
EventType: "DELETED",
167+
Pod: pod.DeepCopy(),
168+
})
135169
}
136170
},
137171
})
138172

139173
if err != nil {
140-
fmt.Printf("ERROR: failed to add event handlers: %s\n", err)
174+
slog.Error("Failed to add event handlers",
175+
"error", err)
141176
}
142177

143178
return cntrl
@@ -148,28 +183,30 @@ func (c *Controller) Run(ctx context.Context, workers int) error {
148183
defer runtime.HandleCrash()
149184
defer c.workqueue.ShutDown()
150185

151-
fmt.Println("Starting pod informer")
186+
slog.Info("Starting pod informer")
152187

153188
// Start the informer
154189
go c.podInformer.Run(ctx.Done())
155190

156191
// Wait for the cache to be synced
157-
fmt.Println("Waiting for informer cache to sync")
192+
slog.Info("Waiting for informer cache to sync")
158193
if !cache.WaitForCacheSync(ctx.Done(), c.podInformer.HasSynced) {
159194
return errors.New("timed out waiting for caches to sync")
160195
}
161196

162-
fmt.Printf("Starting %d workers\n", workers)
197+
slog.Info("Starting workers",
198+
"count", workers,
199+
)
163200

164201
// Start workers
165202
for i := 0; i < workers; i++ {
166203
go wait.UntilWithContext(ctx, c.runWorker, time.Second)
167204
}
168205

169-
fmt.Println("Controller started")
206+
slog.Info("Controller started")
170207

171208
<-ctx.Done()
172-
fmt.Println("Shutting down workers")
209+
slog.Info("Shutting down workers")
173210

174211
return nil
175212
}
@@ -195,16 +232,17 @@ func (c *Controller) processNextItem(ctx context.Context) bool {
195232
}
196233

197234
// Requeue on error with rate limiting
198-
fmt.Printf("Error processing %s: %v, requeuing\n", event.Key, err)
235+
slog.Error("Failed to process event, requeuing",
236+
"event_key", event.Key,
237+
"error", err,
238+
)
199239
c.workqueue.AddRateLimited(event)
200240

201241
return true
202242
}
203243

204244
// processEvent processes a single pod event.
205245
func (c *Controller) processEvent(ctx context.Context, event PodEvent) error {
206-
timestamp := time.Now().Format(time.RFC3339)
207-
208246
pod := event.Pod
209247
if pod == nil {
210248
return nil
@@ -219,14 +257,14 @@ func (c *Controller) processEvent(ctx context.Context, event PodEvent) error {
219257

220258
// Record info for each container in the pod
221259
for _, container := range pod.Spec.Containers {
222-
if err := c.recordContainer(ctx, pod, container, status, event.EventType, timestamp); err != nil {
260+
if err := c.recordContainer(ctx, pod, container, status, event.EventType); err != nil {
223261
lastErr = err
224262
}
225263
}
226264

227265
// Also record init containers
228266
for _, container := range pod.Spec.InitContainers {
229-
if err := c.recordContainer(ctx, pod, container, status, event.EventType, timestamp); err != nil {
267+
if err := c.recordContainer(ctx, pod, container, status, event.EventType); err != nil {
230268
lastErr = err
231269
}
232270
}
@@ -235,7 +273,7 @@ func (c *Controller) processEvent(ctx context.Context, event PodEvent) error {
235273
}
236274

237275
// recordContainer records a single container's deployment info.
238-
func (c *Controller) recordContainer(ctx context.Context, pod *corev1.Pod, container corev1.Container, status, eventType, timestamp string) error {
276+
func (c *Controller) recordContainer(ctx context.Context, pod *corev1.Pod, container corev1.Container, status, eventType string) error {
239277
dn := getARDeploymentName(pod, container, c.cfg.Template)
240278
digest := getContainerDigest(pod, container.Name)
241279

@@ -259,13 +297,25 @@ func (c *Controller) recordContainer(ctx context.Context, pod *corev1.Pod, conta
259297
)
260298

261299
if err := c.apiClient.PostOne(ctx, record); err != nil {
262-
fmt.Printf("[%s] FAILED %s name=%s deployment_name=%s error=%v\n",
263-
timestamp, eventType, record.Name, record.DeploymentName, err)
300+
slog.Error("Failed to post record",
301+
"event_type", eventType,
302+
"name", record.Name,
303+
"deployment_name", record.DeploymentName,
304+
"status", record.Status,
305+
"digest", record.Digest,
306+
"error", err,
307+
)
264308
return err
265309
}
266310

267-
fmt.Printf("[%s] OK %s name=%s deployment_name=%s digest=%s status=%s\n",
268-
timestamp, eventType, record.Name, record.DeploymentName, record.Digest, record.Status)
311+
slog.Info("Posted record",
312+
"event_type", eventType,
313+
"name", record.Name,
314+
"deployment_name", record.DeploymentName,
315+
"status", record.Status,
316+
"digest", record.Digest,
317+
)
318+
269319
return nil
270320
}
271321

0 commit comments

Comments
 (0)