Skip to content
This repository was archived by the owner on Aug 7, 2025. It is now read-only.

Commit 49dc519

Browse files
committed
scaling: add used memory to PDF report table
Currently the PDF report shows how much scaling up consumes free memory. This number is not comparable between cluster nodes or even test runs because RAM used for OS caches/buffers/slab is counted as consumed. As a consequence, consumed free memory depends heavily on initial memory conditions of a node, instead of used memory by the k8s and pods. This patch adds "memory used" to the report in order to have less node-dependent and more reproducible memory figure. Using /proc/meminfo "MemAvailable" was also tried out, but it varies almost like "MemFree" that is currently reported. Signed-off-by: Antti Kervinen <antti.kervinen@intel.com>
1 parent a0ca2a2 commit 49dc519

1 file changed

Lines changed: 38 additions & 12 deletions

File tree

metrics/report/report_dockerfile/collectd_scaling.R

Lines changed: 38 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ testnames=c(
2020
podbootdata=c() # Track per-launch data
2121
cpuidledata=c() # Track cpu idle data per nodes
2222
memfreedata=c() # Track mem free data for nodes
23+
memuseddata=c() # Track mem used data for nodes
2324
inodefreedata=c() # Track inode free data for nodes
2425
ifpacketdata=c() # Track interface packet data for nodes
2526
ifoctetdata=c() # Track interface octets data for nodes
@@ -104,11 +105,13 @@ for (currentdir in resultdirs) {
104105
# Get a list of all the nodes from the schedule data list
105106
nodes=names(node_sched_data)
106107

107-
memtotal=0
108+
memfreedelta_total=0
109+
memuseddelta_total=0
108110
cputotal=0
109111
inodetotal=0
110112
cpu_idle_data=c()
111113
mem_free_data=c()
114+
mem_used_data=c()
112115
inode_free_data=c()
113116
interface_packets_data=c()
114117
interface_octets_data=c()
@@ -127,7 +130,7 @@ for (currentdir in resultdirs) {
127130
localhost_dir=paste(node_dir, "localhost", sep="/")
128131

129132
# grab memory data
130-
memory_dir=paste(localhost_dir, "memory", sep="/")
133+
memory_dir=paste(localhost_dir, "memory", sep="/")
131134
# filename has date on the end, so look for the right file name
132135
freemem_pattern='^memory\\-free'
133136
files=list.files(memory_dir, pattern=freemem_pattern)
@@ -136,13 +139,26 @@ for (currentdir in resultdirs) {
136139
mem_free_csv=paste(memory_dir, file, sep="/")
137140
node_mem_free_data=read.csv(mem_free_csv, header=TRUE, sep=",")
138141
node_mem_free_data=cbind(node_mem_free_data,
139-
node=rep(n, length(node_mem_free_data$value)))
142+
node=rep(n, length(node_mem_free_data$value)))
140143
node_mem_free_data=cbind(node_mem_free_data,
141-
testname=rep(testname, length(node_mem_free_data$value)))
144+
testname=rep(testname, length(node_mem_free_data$value)))
142145
node_mem_free_data$s_offset = node_mem_free_data$epoch - local_bootdata[1,]$epoch
143-
144146
mem_free_data=rbind(mem_free_data, node_mem_free_data)
145147
}
148+
# filename has date on the end, so look for the right file name
149+
usedmem_pattern='^memory\\-used'
150+
files=list.files(memory_dir, pattern=usedmem_pattern)
151+
# collectd csv plugin starts a new file for each day of data collected
152+
for(file in files) {
153+
mem_used_csv=paste(memory_dir, file, sep="/")
154+
node_mem_used_data=read.csv(mem_used_csv, header=TRUE, sep=",")
155+
node_mem_used_data=cbind(node_mem_used_data,
156+
node=rep(n, length(node_mem_used_data$value)))
157+
node_mem_used_data=cbind(node_mem_used_data,
158+
testname=rep(testname, length(node_mem_used_data$value)))
159+
node_mem_used_data$s_offset = node_mem_used_data$epoch - local_bootdata[1,]$epoch
160+
mem_used_data=rbind(mem_used_data, node_mem_used_data)
161+
}
146162

147163
# grab CPU data
148164
cpu_dir=paste(localhost_dir, "aggregation-cpu-average", sep="/")
@@ -273,6 +289,8 @@ for (currentdir in resultdirs) {
273289
end_time=local_bootdata$epoch[length(local_bootdata$epoch)]
274290

275291
# get value closest to first pod launch
292+
# memory-free and memory-used data share exactly the same timestamps,
293+
# so the same start/end indexes work for both.
276294
mem_start_index=Position(function(x) x > start_time, node_mem_free_data$epoch)
277295
# take the reading previous to the index as long as a valid index
278296
if (is.na(mem_start_index)) {
@@ -281,6 +299,7 @@ for (currentdir in resultdirs) {
281299
mem_start_index = mem_start_index - 1
282300
}
283301
max_free_mem=node_mem_free_data$value[mem_start_index]
302+
min_used_mem=node_mem_used_data$value[mem_start_index]
284303

285304
# get value closest to last pod launch
286305
mem_end_index=Position(function(x) x > end_time, node_mem_free_data$epoch)
@@ -291,8 +310,10 @@ for (currentdir in resultdirs) {
291310
mem_end_index = mem_end_index - 1
292311
}
293312
min_free_mem=node_mem_free_data$value[mem_end_index]
313+
max_used_mem=node_mem_used_data$value[mem_end_index]
294314

295-
memtotal = memtotal + (max_free_mem - min_free_mem)
315+
memfreedelta_total = memfreedelta_total + (max_free_mem - min_free_mem)
316+
memuseddelta_total = memuseddelta_total + (max_used_mem - min_used_mem)
296317

297318
# get value closest to first pod launch
298319
cpu_start_index=Position(function(x) x > start_time, node_cpu_idle_data$epoch)
@@ -342,17 +363,21 @@ for (currentdir in resultdirs) {
342363
num_pods = local_bootdata$n_pods[length(local_bootdata$n_pods)]
343364

344365
# We get data in b, but want the graphs in Gb.
345-
memtotal = memtotal / (1024*1024*1024)
346-
gb_per_pod = memtotal/num_pods
347-
pod_per_gb = 1/gb_per_pod
366+
memfreedelta_total = memfreedelta_total / (1024*1024*1024)
367+
memuseddelta_total = memuseddelta_total / (1024*1024*1024)
368+
gb_nonfree_per_pod = memfreedelta_total/num_pods
369+
gb_used_per_pod = memuseddelta_total/num_pods
370+
pod_per_nonfree_gb = 1/gb_nonfree_per_pod
371+
pod_per_used_gb = 1/gb_used_per_pod
348372

349373
# Memory usage stats.
350374
local_mems = c(
351375
"Test"=testname,
352376
"n"=num_pods,
353-
"Tot_Gb"=round(memtotal, 3),
354-
"avg_Gb"=round(gb_per_pod, 4),
355-
"n_per_Gb"=round(pod_per_gb, 2)
377+
"Free_GB_delta"=round(memfreedelta_total, 3),
378+
"Used_GB_delta"=round(memuseddelta_total, 3),
379+
"n_per_nonfree_GB"=round(pod_per_nonfree_gb, 2),
380+
"n_per_used_GB"=round(pod_per_used_gb, 2)
356381
)
357382
memstats=rbind(memstats, local_mems)
358383

@@ -393,6 +418,7 @@ for (currentdir in resultdirs) {
393418
podbootdata=rbind(podbootdata, local_bootdata, make.row.names=FALSE)
394419
cpuidledata=rbind(cpuidledata, cpu_idle_data)
395420
memfreedata=rbind(memfreedata, mem_free_data)
421+
memuseddata=rbind(memuseddata, mem_used_data)
396422
inodefreedata=rbind(inodefreedata, inode_free_data)
397423
ifpacketdata=rbind(ifpacketdata, interface_packets_data)
398424
ifoctetdata=rbind(ifoctetdata, interface_octets_data)

0 commit comments

Comments
 (0)