|
1 | 1 | /* |
2 | | - * Copyright (c) 2006-2024, RT-Thread Development Team |
| 2 | + * Copyright (c) 2006-2026, RT-Thread Development Team |
3 | 3 | * |
4 | 4 | * SPDX-License-Identifier: Apache-2.0 |
5 | 5 | * |
|
28 | 28 | * 2023-10-21 Shell support the common backtrace API which is arch-independent |
29 | 29 | * 2023-12-10 xqyjlj perf rt_hw_interrupt_disable/enable, fix memheap lock |
30 | 30 | * 2024-03-10 Meco Man move std libc related functions to rtklibc |
| 31 | + * 2026-03-16 Rbb666 Change rt_thread_get_usage to incremental statistics. |
31 | 32 | */ |
32 | 33 |
|
33 | 34 | #include <rtthread.h> |
@@ -572,49 +573,174 @@ rt_err_t rt_backtrace_thread(rt_thread_t thread) |
572 | 573 | } |
573 | 574 |
|
574 | 575 | #ifdef RT_USING_CPU_USAGE_TRACER |
| 576 | + |
| 577 | +#define RT_CPU_USAGE_CALC_INTERVAL_TICK \ |
| 578 | + ((RT_TICK_PER_SECOND * RT_CPU_USAGE_CALC_INTERVAL_MS + 999U) / 1000U) |
| 579 | + |
| 580 | +static rt_tick_t _cpu_usage_sample_tick; |
| 581 | +static rt_bool_t _cpu_usage_inited = RT_FALSE; |
| 582 | +static struct rt_cpu_usage_stats _cpu_usage_prev_cpu_stat[RT_CPUS_NR]; |
| 583 | +static struct rt_spinlock _cpu_usage_lock = RT_SPINLOCK_INIT; |
| 584 | + |
| 585 | +/* |
| 586 | + * Calculate total CPU-time delta for this sampling window and |
| 587 | + * refresh per-CPU snapshots. |
| 588 | + * |
| 589 | + * Each counter delta is computed in rt_ubase_t width first, so wrap-around |
| 590 | + * on 32-bit targets is handled naturally by unsigned arithmetic. |
| 591 | + */ |
| 592 | +static rt_uint64_t _cpu_usage_calc_total_delta(void) |
| 593 | +{ |
| 594 | + rt_uint64_t total_delta = 0; |
| 595 | + int i; |
| 596 | + |
| 597 | + for (i = 0; i < RT_CPUS_NR; i++) |
| 598 | + { |
| 599 | + rt_cpu_t pcpu = rt_cpu_index(i); |
| 600 | + rt_ubase_t user_now = pcpu->cpu_stat.user; |
| 601 | + rt_ubase_t system_now = pcpu->cpu_stat.system; |
| 602 | + rt_ubase_t idle_now = pcpu->cpu_stat.idle; |
| 603 | + |
| 604 | + /* Per-counter delta first to avoid overflow artifacts after sum. */ |
| 605 | + rt_ubase_t user_delta = (rt_ubase_t)(user_now - _cpu_usage_prev_cpu_stat[i].user); |
| 606 | + rt_ubase_t system_delta = (rt_ubase_t)(system_now - _cpu_usage_prev_cpu_stat[i].system); |
| 607 | + rt_ubase_t idle_delta = (rt_ubase_t)(idle_now - _cpu_usage_prev_cpu_stat[i].idle); |
| 608 | + |
| 609 | + total_delta += (rt_uint64_t)user_delta; |
| 610 | + total_delta += (rt_uint64_t)system_delta; |
| 611 | + total_delta += (rt_uint64_t)idle_delta; |
| 612 | + |
| 613 | + _cpu_usage_prev_cpu_stat[i].user = user_now; |
| 614 | + _cpu_usage_prev_cpu_stat[i].system = system_now; |
| 615 | + _cpu_usage_prev_cpu_stat[i].idle = idle_now; |
| 616 | + } |
| 617 | + |
| 618 | + return total_delta; |
| 619 | +} |
| 620 | + |
| 621 | +static void _cpu_usage_snapshot_init(void) |
| 622 | +{ |
| 623 | + struct rt_object_information *info; |
| 624 | + rt_list_t *list; |
| 625 | + rt_list_t *node; |
| 626 | + rt_base_t level; |
| 627 | + int i; |
| 628 | + |
| 629 | + info = rt_object_get_information(RT_Object_Class_Thread); |
| 630 | + list = &info->object_list; |
| 631 | + |
| 632 | + level = rt_spin_lock_irqsave(&info->spinlock); |
| 633 | + for (node = list->next; node != list; node = node->next) |
| 634 | + { |
| 635 | + struct rt_object *obj = rt_list_entry(node, struct rt_object, list); |
| 636 | + struct rt_thread *t = (struct rt_thread *)obj; |
| 637 | + |
| 638 | + t->total_time_prev = 0U; |
| 639 | + t->cpu_usage = 0U; |
| 640 | + } |
| 641 | + rt_spin_unlock_irqrestore(&info->spinlock, level); |
| 642 | + |
| 643 | + for (i = 0; i < RT_CPUS_NR; i++) |
| 644 | + { |
| 645 | + _cpu_usage_prev_cpu_stat[i].user = 0U; |
| 646 | + _cpu_usage_prev_cpu_stat[i].system = 0U; |
| 647 | + _cpu_usage_prev_cpu_stat[i].idle = 0U; |
| 648 | + } |
| 649 | + |
| 650 | + _cpu_usage_sample_tick = rt_tick_get(); |
| 651 | + _cpu_usage_inited = RT_TRUE; |
| 652 | +} |
| 653 | + |
| 654 | +static void _cpu_usage_refresh_threads(rt_uint64_t total_delta) |
| 655 | +{ |
| 656 | + struct rt_object_information *info; |
| 657 | + rt_list_t *list; |
| 658 | + rt_list_t *node; |
| 659 | + rt_base_t level; |
| 660 | + |
| 661 | + info = rt_object_get_information(RT_Object_Class_Thread); |
| 662 | + list = &info->object_list; |
| 663 | + |
| 664 | + level = rt_spin_lock_irqsave(&info->spinlock); |
| 665 | + for (node = list->next; node != list; node = node->next) |
| 666 | + { |
| 667 | + struct rt_object *obj = rt_list_entry(node, struct rt_object, list); |
| 668 | + struct rt_thread *t = (struct rt_thread *)obj; |
| 669 | + rt_ubase_t total_now = (rt_ubase_t)(t->user_time + t->system_time); |
| 670 | + rt_ubase_t total_delta_now = (rt_ubase_t)(total_now - t->total_time_prev); |
| 671 | + rt_uint64_t thread_delta = (rt_uint64_t)total_delta_now; |
| 672 | + |
| 673 | + if (total_delta > 0U) |
| 674 | + { |
| 675 | + rt_uint64_t usage = (thread_delta * 100U) / total_delta; |
| 676 | + t->cpu_usage = (rt_uint8_t)(usage > 100U ? 100U : usage); |
| 677 | + } |
| 678 | + else |
| 679 | + { |
| 680 | + t->cpu_usage = 0U; |
| 681 | + } |
| 682 | + |
| 683 | + t->total_time_prev = total_now; |
| 684 | + } |
| 685 | + rt_spin_unlock_irqrestore(&info->spinlock, level); |
| 686 | +} |
| 687 | + |
| 688 | +static void _cpu_usage_update(void) |
| 689 | +{ |
| 690 | + rt_tick_t tick_now; |
| 691 | + rt_tick_t delta_tick; |
| 692 | + rt_uint64_t total_delta; |
| 693 | + rt_bool_t bypass_interval_check = RT_FALSE; |
| 694 | + |
| 695 | + if (!_cpu_usage_inited) |
| 696 | + { |
| 697 | + _cpu_usage_snapshot_init(); |
| 698 | + bypass_interval_check = RT_TRUE; |
| 699 | + } |
| 700 | + |
| 701 | + tick_now = rt_tick_get(); |
| 702 | + delta_tick = rt_tick_get_delta(_cpu_usage_sample_tick); |
| 703 | + if (!bypass_interval_check && delta_tick < RT_CPU_USAGE_CALC_INTERVAL_TICK) |
| 704 | + { |
| 705 | + return; |
| 706 | + } |
| 707 | + |
| 708 | + total_delta = _cpu_usage_calc_total_delta(); |
| 709 | + _cpu_usage_refresh_threads(total_delta); |
| 710 | + _cpu_usage_sample_tick = tick_now; |
| 711 | +} |
| 712 | + |
575 | 713 | /** |
576 | | - * @brief Get thread usage percentage relative to total system CPU time |
| 714 | + * @brief Get thread CPU usage percentage in the recent sampling window |
577 | 715 | * |
578 | | - * This function calculates the CPU usage percentage of a specific thread |
579 | | - * relative to the total CPU time consumed by all threads in the system. |
| 716 | + * This function returns per-thread CPU usage based on delta runtime in the |
| 717 | + * latest sampling window, rather than cumulative runtime since boot. |
580 | 718 | * |
581 | 719 | * @param thread Pointer to the thread object. Must not be NULL. |
582 | 720 | * |
583 | 721 | * @return The CPU usage percentage as an integer value (0-100). |
584 | | - * Returns 0 if total system time is 0 or if CPU usage tracing is not enabled. |
| 722 | + * If sampling interval has not elapsed yet, the previous cached value |
| 723 | + * is returned (initial value is 0). |
585 | 724 | * |
586 | 725 | * @note This function requires RT_USING_CPU_USAGE_TRACER to be enabled. |
587 | | - * @note The percentage is calculated as: (thread_time * 100) / total_system_time |
588 | | - * @note Due to integer arithmetic, the result is truncated and may not sum |
589 | | - * to exactly 100% across all threads due to rounding. |
| 726 | + * @note The percentage is calculated as |
| 727 | + * (thread_time_delta * 100) / total_time_delta, |
| 728 | + * where total_time_delta is the sum of user/system/idle deltas of all CPUs. |
| 729 | + * @note Sampling interval can be tuned with RT_CPU_USAGE_CALC_INTERVAL_MS. |
590 | 730 | * @note If thread is NULL, an assertion will be triggered in debug builds. |
591 | 731 | */ |
592 | 732 | rt_uint8_t rt_thread_get_usage(rt_thread_t thread) |
593 | 733 | { |
594 | | - rt_ubase_t thread_time; |
595 | | - rt_ubase_t total_time = 0U; |
596 | | - int i; |
597 | | - rt_cpu_t pcpu; |
| 734 | + rt_uint8_t usage; |
598 | 735 |
|
599 | 736 | RT_ASSERT(thread != RT_NULL); |
600 | 737 |
|
601 | | - thread_time = thread->user_time + thread->system_time; |
602 | | - |
603 | | - /* Calculate total system time by summing all CPUs' time */ |
604 | | - for (i = 0; i < RT_CPUS_NR; i++) |
605 | | - { |
606 | | - pcpu = rt_cpu_index(i); |
607 | | - total_time += pcpu->cpu_stat.user + pcpu->cpu_stat.system + pcpu->cpu_stat.idle; |
608 | | - } |
609 | | - |
610 | | - if (total_time > 0U) |
611 | | - { |
612 | | - /* Calculate thread usage percentage: (thread_time * 100) / total_time */ |
613 | | - rt_ubase_t usage = (thread_time * 100U) / total_time; |
614 | | - return (rt_uint8_t)(usage > 100U ? 100U : usage); |
615 | | - } |
| 738 | + rt_spin_lock(&_cpu_usage_lock); |
| 739 | + _cpu_usage_update(); |
| 740 | + usage = thread->cpu_usage; |
| 741 | + rt_spin_unlock(&_cpu_usage_lock); |
616 | 742 |
|
617 | | - return 0U; |
| 743 | + return usage; |
618 | 744 | } |
619 | 745 | #endif /* RT_USING_CPU_USAGE_TRACER */ |
620 | 746 |
|
|
0 commit comments