Skip to content

Commit b290c01

Browse files
committed
[kernel]Rewrite rt_thread_get_usage to use incremental statistics based on sampling windows.
1 parent 012e301 commit b290c01

File tree

4 files changed

+174
-28
lines changed

4 files changed

+174
-28
lines changed

include/rtdef.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -935,6 +935,8 @@ struct rt_thread
935935
#ifdef RT_USING_CPU_USAGE_TRACER
936936
rt_ubase_t user_time; /**< Ticks on user */
937937
rt_ubase_t system_time; /**< Ticks on system */
938+
rt_ubase_t total_time_prev; /**< Previous total ticks snapshot */
939+
rt_uint8_t cpu_usage; /**< Recent CPU usage in percent */
938940
#endif /* RT_USING_CPU_USAGE_TRACER */
939941

940942
#ifdef RT_USING_MEM_PROTECTION

src/Kconfig

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,17 @@ config RT_USING_CPU_USAGE_TRACER
199199
percentage information through the list thread command.
200200
It will automatically integrate with the scheduler to track thread execution time.
201201

202+
if RT_USING_CPU_USAGE_TRACER
203+
config RT_CPU_USAGE_CALC_INTERVAL_MS
204+
int "CPU usage sampling interval (ms)"
205+
default 200
206+
range 50 5000
207+
help
208+
Sampling window for thread CPU usage display.
209+
A shorter interval updates faster but fluctuates more.
210+
A longer interval is smoother but has higher display latency.
211+
endif
212+
202213
menu "kservice options"
203214
config RT_USING_TINY_FFS
204215
bool "Enable kservice to use tiny finding first bit set method"

src/kservice.c

Lines changed: 154 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2006-2024, RT-Thread Development Team
2+
* Copyright (c) 2006-2026, RT-Thread Development Team
33
*
44
* SPDX-License-Identifier: Apache-2.0
55
*
@@ -28,6 +28,7 @@
2828
* 2023-10-21 Shell support the common backtrace API which is arch-independent
2929
* 2023-12-10 xqyjlj perf rt_hw_interrupt_disable/enable, fix memheap lock
3030
* 2024-03-10 Meco Man move std libc related functions to rtklibc
31+
* 2026-03-16 Rbb666 Change rt_thread_get_usage to incremental statistics.
3132
*/
3233

3334
#include <rtthread.h>
@@ -572,49 +573,174 @@ rt_err_t rt_backtrace_thread(rt_thread_t thread)
572573
}
573574

574575
#ifdef RT_USING_CPU_USAGE_TRACER
576+
577+
#define RT_CPU_USAGE_CALC_INTERVAL_TICK \
578+
((RT_TICK_PER_SECOND * RT_CPU_USAGE_CALC_INTERVAL_MS + 999U) / 1000U)
579+
580+
static rt_tick_t _cpu_usage_sample_tick;
581+
static rt_bool_t _cpu_usage_inited = RT_FALSE;
582+
static struct rt_cpu_usage_stats _cpu_usage_prev_cpu_stat[RT_CPUS_NR];
583+
static struct rt_spinlock _cpu_usage_lock = RT_SPINLOCK_INIT;
584+
585+
/*
586+
* Calculate total CPU-time delta for this sampling window and
587+
* refresh per-CPU snapshots.
588+
*
589+
* Each counter delta is computed in rt_ubase_t width first, so wrap-around
590+
* on 32-bit targets is handled naturally by unsigned arithmetic.
591+
*/
592+
static rt_uint64_t _cpu_usage_calc_total_delta(void)
593+
{
594+
rt_uint64_t total_delta = 0;
595+
int i;
596+
597+
for (i = 0; i < RT_CPUS_NR; i++)
598+
{
599+
rt_cpu_t pcpu = rt_cpu_index(i);
600+
rt_ubase_t user_now = pcpu->cpu_stat.user;
601+
rt_ubase_t system_now = pcpu->cpu_stat.system;
602+
rt_ubase_t idle_now = pcpu->cpu_stat.idle;
603+
604+
/* Per-counter delta first to avoid overflow artifacts after sum. */
605+
rt_ubase_t user_delta = (rt_ubase_t)(user_now - _cpu_usage_prev_cpu_stat[i].user);
606+
rt_ubase_t system_delta = (rt_ubase_t)(system_now - _cpu_usage_prev_cpu_stat[i].system);
607+
rt_ubase_t idle_delta = (rt_ubase_t)(idle_now - _cpu_usage_prev_cpu_stat[i].idle);
608+
609+
total_delta += (rt_uint64_t)user_delta;
610+
total_delta += (rt_uint64_t)system_delta;
611+
total_delta += (rt_uint64_t)idle_delta;
612+
613+
_cpu_usage_prev_cpu_stat[i].user = user_now;
614+
_cpu_usage_prev_cpu_stat[i].system = system_now;
615+
_cpu_usage_prev_cpu_stat[i].idle = idle_now;
616+
}
617+
618+
return total_delta;
619+
}
620+
621+
static void _cpu_usage_snapshot_init(void)
622+
{
623+
struct rt_object_information *info;
624+
rt_list_t *list;
625+
rt_list_t *node;
626+
rt_base_t level;
627+
int i;
628+
629+
info = rt_object_get_information(RT_Object_Class_Thread);
630+
list = &info->object_list;
631+
632+
level = rt_spin_lock_irqsave(&info->spinlock);
633+
for (node = list->next; node != list; node = node->next)
634+
{
635+
struct rt_object *obj = rt_list_entry(node, struct rt_object, list);
636+
struct rt_thread *t = (struct rt_thread *)obj;
637+
638+
t->total_time_prev = 0U;
639+
t->cpu_usage = 0U;
640+
}
641+
rt_spin_unlock_irqrestore(&info->spinlock, level);
642+
643+
for (i = 0; i < RT_CPUS_NR; i++)
644+
{
645+
_cpu_usage_prev_cpu_stat[i].user = 0U;
646+
_cpu_usage_prev_cpu_stat[i].system = 0U;
647+
_cpu_usage_prev_cpu_stat[i].idle = 0U;
648+
}
649+
650+
_cpu_usage_sample_tick = rt_tick_get();
651+
_cpu_usage_inited = RT_TRUE;
652+
}
653+
654+
static void _cpu_usage_refresh_threads(rt_uint64_t total_delta)
655+
{
656+
struct rt_object_information *info;
657+
rt_list_t *list;
658+
rt_list_t *node;
659+
rt_base_t level;
660+
661+
info = rt_object_get_information(RT_Object_Class_Thread);
662+
list = &info->object_list;
663+
664+
level = rt_spin_lock_irqsave(&info->spinlock);
665+
for (node = list->next; node != list; node = node->next)
666+
{
667+
struct rt_object *obj = rt_list_entry(node, struct rt_object, list);
668+
struct rt_thread *t = (struct rt_thread *)obj;
669+
rt_ubase_t total_now = (rt_ubase_t)(t->user_time + t->system_time);
670+
rt_ubase_t total_delta_now = (rt_ubase_t)(total_now - t->total_time_prev);
671+
rt_uint64_t thread_delta = (rt_uint64_t)total_delta_now;
672+
673+
if (total_delta > 0U)
674+
{
675+
rt_uint64_t usage = (thread_delta * 100U) / total_delta;
676+
t->cpu_usage = (rt_uint8_t)(usage > 100U ? 100U : usage);
677+
}
678+
else
679+
{
680+
t->cpu_usage = 0U;
681+
}
682+
683+
t->total_time_prev = total_now;
684+
}
685+
rt_spin_unlock_irqrestore(&info->spinlock, level);
686+
}
687+
688+
static void _cpu_usage_update(void)
689+
{
690+
rt_tick_t tick_now;
691+
rt_tick_t delta_tick;
692+
rt_uint64_t total_delta;
693+
rt_bool_t bypass_interval_check = RT_FALSE;
694+
695+
if (!_cpu_usage_inited)
696+
{
697+
_cpu_usage_snapshot_init();
698+
bypass_interval_check = RT_TRUE;
699+
}
700+
701+
tick_now = rt_tick_get();
702+
delta_tick = rt_tick_get_delta(_cpu_usage_sample_tick);
703+
if (!bypass_interval_check && delta_tick < RT_CPU_USAGE_CALC_INTERVAL_TICK)
704+
{
705+
return;
706+
}
707+
708+
total_delta = _cpu_usage_calc_total_delta();
709+
_cpu_usage_refresh_threads(total_delta);
710+
_cpu_usage_sample_tick = tick_now;
711+
}
712+
575713
/**
576-
* @brief Get thread usage percentage relative to total system CPU time
714+
* @brief Get thread CPU usage percentage in the recent sampling window
577715
*
578-
* This function calculates the CPU usage percentage of a specific thread
579-
* relative to the total CPU time consumed by all threads in the system.
716+
* This function returns per-thread CPU usage based on delta runtime in the
717+
* latest sampling window, rather than cumulative runtime since boot.
580718
*
581719
* @param thread Pointer to the thread object. Must not be NULL.
582720
*
583721
* @return The CPU usage percentage as an integer value (0-100).
584-
* Returns 0 if total system time is 0 or if CPU usage tracing is not enabled.
722+
* If sampling interval has not elapsed yet, the previous cached value
723+
* is returned (initial value is 0).
585724
*
586725
* @note This function requires RT_USING_CPU_USAGE_TRACER to be enabled.
587-
* @note The percentage is calculated as: (thread_time * 100) / total_system_time
588-
* @note Due to integer arithmetic, the result is truncated and may not sum
589-
* to exactly 100% across all threads due to rounding.
726+
* @note The percentage is calculated as
727+
* (thread_time_delta * 100) / total_time_delta,
728+
* where total_time_delta is the sum of user/system/idle deltas of all CPUs.
729+
* @note Sampling interval can be tuned with RT_CPU_USAGE_CALC_INTERVAL_MS.
590730
* @note If thread is NULL, an assertion will be triggered in debug builds.
591731
*/
592732
rt_uint8_t rt_thread_get_usage(rt_thread_t thread)
593733
{
594-
rt_ubase_t thread_time;
595-
rt_ubase_t total_time = 0U;
596-
int i;
597-
rt_cpu_t pcpu;
734+
rt_uint8_t usage;
598735

599736
RT_ASSERT(thread != RT_NULL);
600737

601-
thread_time = thread->user_time + thread->system_time;
602-
603-
/* Calculate total system time by summing all CPUs' time */
604-
for (i = 0; i < RT_CPUS_NR; i++)
605-
{
606-
pcpu = rt_cpu_index(i);
607-
total_time += pcpu->cpu_stat.user + pcpu->cpu_stat.system + pcpu->cpu_stat.idle;
608-
}
609-
610-
if (total_time > 0U)
611-
{
612-
/* Calculate thread usage percentage: (thread_time * 100) / total_time */
613-
rt_ubase_t usage = (thread_time * 100U) / total_time;
614-
return (rt_uint8_t)(usage > 100U ? 100U : usage);
615-
}
738+
rt_spin_lock(&_cpu_usage_lock);
739+
_cpu_usage_update();
740+
usage = thread->cpu_usage;
741+
rt_spin_unlock(&_cpu_usage_lock);
616742

617-
return 0U;
743+
return usage;
618744
}
619745
#endif /* RT_USING_CPU_USAGE_TRACER */
620746

src/thread.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -277,6 +277,13 @@ static rt_err_t _thread_init(struct rt_thread *thread,
277277
thread->system_time = 0;
278278
#endif
279279

280+
#ifdef RT_USING_CPU_USAGE_TRACER
281+
thread->user_time = 0;
282+
thread->system_time = 0;
283+
thread->total_time_prev = 0;
284+
thread->cpu_usage = 0;
285+
#endif /* RT_USING_CPU_USAGE_TRACER */
286+
280287
#ifdef RT_USING_PTHREADS
281288
thread->pthread_data = RT_NULL;
282289
#endif /* RT_USING_PTHREADS */

0 commit comments

Comments
 (0)