Skip to content

Commit 118bfde

Browse files
feat(freertos-smp): Added support for TCB locks
1 parent e850728 commit 118bfde

File tree

2 files changed

+224
-14
lines changed

2 files changed

+224
-14
lines changed

include/FreeRTOS.h

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -373,6 +373,25 @@
373373
#define portUSING_GRANULAR_LOCKS 0
374374
#endif
375375

376+
/* configUSE_TCB_DATA_GROUP_LOCK enables per-TCB spinlocks to protect TCB-specific
377+
* data such as uxPreemptionDisable. This reduces lock contention compared to using
378+
* the global kernel lock. When enabled:
379+
* - Each TCB has its own spinlock (xTCBSpinlock)
380+
* - vTaskPreemptionDisable/Enable use the TCB lock instead of kernel lock
381+
* - prvYieldCore acquires the target TCB's lock before checking uxPreemptionDisable
382+
* This feature requires portUSING_GRANULAR_LOCKS and multi-core. */
383+
#ifndef configUSE_TCB_DATA_GROUP_LOCK
384+
#define configUSE_TCB_DATA_GROUP_LOCK 0
385+
#endif
386+
387+
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) )
388+
#error configUSE_TCB_DATA_GROUP_LOCK requires portUSING_GRANULAR_LOCKS to be enabled
389+
#endif
390+
391+
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES == 1 ) )
392+
#error configUSE_TCB_DATA_GROUP_LOCK is not supported in single core FreeRTOS
393+
#endif
394+
376395
#ifndef configMAX_TASK_NAME_LEN
377396
#define configMAX_TASK_NAME_LEN 16
378397
#endif
@@ -3296,6 +3315,9 @@ typedef struct xSTATIC_TCB
32963315
void * pvDummyDirectTransferBuffer;
32973316
BaseType_t xDummyDirectTransferPosition;
32983317
#endif
3318+
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3319+
portSPINLOCK_TYPE xTCBDummySpinlock; /**< Spinlock protecting TCB-specific data (uxPreemptionDisable, uxDeferredStateChange). */
3320+
#endif
32993321
} StaticTask_t;
33003322

33013323
/*

tasks.c

Lines changed: 202 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -350,7 +350,44 @@
350350
/* Yields the given core. This must be called from a critical section and xCoreID
351351
* must be valid. This macro is not required in single core since there is only
352352
* one core to yield. */
353-
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
353+
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
354+
355+
/* When TCB data group lock is enabled, we need to acquire the target core's
356+
* TCB spinlock before checking uxPreemptionDisable to prevent a race condition
357+
* where the target core could disable preemption between our check and the
358+
* cross-core interrupt arriving. */
359+
#define prvYieldCore( xCoreID ) \
360+
do { \
361+
BaseType_t xCurrentCoreID = ( BaseType_t ) portGET_CORE_ID(); \
362+
BaseType_t xCoreToYield = ( xCoreID ); \
363+
if( xCoreToYield == xCurrentCoreID ) \
364+
{ \
365+
/* Pending a yield for this core since it is in the critical section. */ \
366+
xYieldPendings[ xCoreToYield ] = pdTRUE; \
367+
} \
368+
else \
369+
{ \
370+
/* Acquire the target core's TCB spinlock to prevent race with vTaskPreemptionDisable. */ \
371+
portGET_SPINLOCK( xCurrentCoreID, &( pxCurrentTCBs[ xCoreToYield ]->xTCBSpinlock ) ); \
372+
{ \
373+
if( pxCurrentTCBs[ xCoreToYield ]->uxPreemptionDisable == 0U ) \
374+
{ \
375+
/* Request other core to yield if it is not requested before. */ \
376+
if( pxCurrentTCBs[ xCoreToYield ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \
377+
{ \
378+
portYIELD_CORE( xCoreToYield ); \
379+
pxCurrentTCBs[ xCoreToYield ]->xTaskRunState = taskTASK_SCHEDULED_TO_YIELD; \
380+
} \
381+
} \
382+
else \
383+
{ \
384+
xYieldPendings[ xCoreToYield ] = pdTRUE; \
385+
} \
386+
} \
387+
portRELEASE_SPINLOCK( xCurrentCoreID, &( pxCurrentTCBs[ xCoreToYield ]->xTCBSpinlock ) ); \
388+
} \
389+
} while( 0 )
390+
#elif ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
354391
#define prvYieldCore( xCoreID ) \
355392
do { \
356393
if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \
@@ -375,7 +412,7 @@
375412
} \
376413
} \
377414
} while( 0 )
378-
#else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
415+
#else /* if ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) */
379416
#define prvYieldCore( xCoreID ) \
380417
do { \
381418
if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() ) \
@@ -393,7 +430,7 @@
393430
} \
394431
} \
395432
} while( 0 )
396-
#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
433+
#endif /* #if ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) */
397434
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
398435
/*-----------------------------------------------------------*/
399436

@@ -524,6 +561,10 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to
524561
* NULL when not using direct transfer */
525562
BaseType_t xDirectTransferPosition; /**< Position for direct transfer (queueSEND_TO_BACK, queueSEND_TO_FRONT, queueOVERWRITE) */
526563
#endif
564+
565+
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
566+
portSPINLOCK_TYPE xTCBSpinlock; /**< Spinlock protecting TCB-specific data (uxPreemptionDisable, uxDeferredStateChange). */
567+
#endif
527568
} tskTCB;
528569

529570
/* The old tskTCB name is maintained above then typedefed to the new TCB_t name
@@ -2173,6 +2214,12 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
21732214
}
21742215
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
21752216

2217+
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
2218+
{
2219+
portINIT_SPINLOCK( &( pxNewTCB->xTCBSpinlock ) );
2220+
}
2221+
#endif
2222+
21762223
if( pxCreatedTask != NULL )
21772224
{
21782225
/* Pass the handle out in an anonymous way. The handle can be used to
@@ -3311,6 +3358,123 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33113358

33123359
/*-----------------------------------------------------------*/
33133360

3361+
#if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
3362+
3363+
static void prvTaskTCBLockCheckForRunStateChange( void )
3364+
{
3365+
const TCB_t * pxThisTCB;
3366+
BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
3367+
3368+
/* This must only be called from within a task. */
3369+
portASSERT_IF_IN_ISR();
3370+
3371+
/* This function is always called with interrupts disabled
3372+
* so this is safe. */
3373+
pxThisTCB = pxCurrentTCBs[ xCoreID ];
3374+
3375+
while( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD )
3376+
{
3377+
UBaseType_t uxPrevCriticalNesting;
3378+
3379+
/* We are only here if we just entered a critical section
3380+
* or if we just suspended the scheduler, and another task
3381+
* has requested that we yield.
3382+
*
3383+
* This is slightly complicated since we need to save and restore
3384+
* the suspension and critical nesting counts, as well as release
3385+
* and reacquire the correct locks. And then, do it all over again
3386+
* if our state changed again during the reacquisition. */
3387+
uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT( xCoreID );
3388+
3389+
if( uxPrevCriticalNesting > 0U )
3390+
{
3391+
portSET_CRITICAL_NESTING_COUNT( xCoreID, 0U );
3392+
portRELEASE_SPINLOCK( xCoreID, &pxCurrentTCBs[ xCoreID ]->xTCBSpinlock );
3393+
}
3394+
else
3395+
{
3396+
/* The scheduler is suspended. uxSchedulerSuspended is updated
3397+
* only when the task is not requested to yield. */
3398+
mtCOVERAGE_TEST_MARKER();
3399+
}
3400+
3401+
portMEMORY_BARRIER();
3402+
3403+
portENABLE_INTERRUPTS();
3404+
3405+
/* Enabling interrupts should cause this core to immediately service
3406+
* the pending interrupt and yield. After servicing the pending interrupt,
3407+
* the task needs to re-evaluate its run state within this loop, as
3408+
* other cores may have requested this task to yield, potentially altering
3409+
* its run state. */
3410+
3411+
portDISABLE_INTERRUPTS();
3412+
3413+
xCoreID = ( BaseType_t ) portGET_CORE_ID();
3414+
portGET_SPINLOCK( xCoreID, &pxCurrentTCBs[ xCoreID ]->xTCBSpinlock );
3415+
3416+
portSET_CRITICAL_NESTING_COUNT( xCoreID, uxPrevCriticalNesting );
3417+
}
3418+
}
3419+
3420+
void vTaskTCBEnterCritical( void )
3421+
{
3422+
if( xSchedulerRunning != pdFALSE )
3423+
{
3424+
portDISABLE_INTERRUPTS();
3425+
{
3426+
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
3427+
3428+
portGET_SPINLOCK( xCoreID, &pxCurrentTCBs[ xCoreID ]->xTCBSpinlock );
3429+
3430+
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID );
3431+
3432+
if( ( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U ) &&
3433+
( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) )
3434+
{
3435+
prvTaskTCBLockCheckForRunStateChange();
3436+
}
3437+
}
3438+
}
3439+
}
3440+
3441+
void vTaskTCBExitCritical( void )
3442+
{
3443+
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID();
3444+
3445+
if( xSchedulerRunning != pdFALSE )
3446+
{
3447+
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U )
3448+
{
3449+
BaseType_t xYieldCurrentTask = pdFALSE;
3450+
3451+
/* Get the xYieldPending stats inside the critical section. */
3452+
if( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U )
3453+
{
3454+
xYieldCurrentTask = xYieldPendings[ xCoreID ];
3455+
}
3456+
3457+
portRELEASE_SPINLOCK( xCoreID, &pxCurrentTCBs[ xCoreID ]->xTCBSpinlock );
3458+
3459+
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID );
3460+
3461+
/* If the critical nesting count is 0, enable interrupts */
3462+
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U )
3463+
{
3464+
portENABLE_INTERRUPTS();
3465+
3466+
if( xYieldCurrentTask != pdFALSE )
3467+
{
3468+
portYIELD();
3469+
}
3470+
}
3471+
}
3472+
}
3473+
}
3474+
3475+
#endif /* #if ( ( configUSE_TCB_DATA_GROUP_LOCK == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
3476+
/*-----------------------------------------------------------*/
3477+
33143478
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
33153479

33163480
void vTaskPreemptionDisable( const TaskHandle_t xTask )
@@ -3319,8 +3483,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33193483

33203484
traceENTER_vTaskPreemptionDisable( xTask );
33213485

3322-
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3323-
vKernelLightWeightEnterCritical();
3486+
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3487+
vTaskTCBEnterCritical();
33243488
#else
33253489
kernelENTER_CRITICAL();
33263490
#endif
@@ -3337,8 +3501,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33373501
mtCOVERAGE_TEST_MARKER();
33383502
}
33393503
}
3340-
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3341-
vKernelLightWeightExitCritical();
3504+
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3505+
vTaskTCBExitCritical();
33423506
#else
33433507
kernelEXIT_CRITICAL();
33443508
#endif
@@ -3356,15 +3520,18 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33563520
TCB_t * pxTCB;
33573521
UBaseType_t uxDeferredAction = 0U;
33583522
BaseType_t xAlreadyYielded = pdFALSE;
3523+
BaseType_t xTaskRequestedToYield = pdFALSE;
33593524

3360-
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3361-
vKernelLightWeightEnterCritical();
3525+
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3526+
vTaskTCBEnterCritical();
33623527
#else
33633528
kernelENTER_CRITICAL();
33643529
#endif
33653530
{
33663531
if( xSchedulerRunning != pdFALSE )
33673532
{
3533+
/* Current task running on the core can not be changed by other core.
3534+
* Get TCB from handle is safe to call within TCB critical section. */
33683535
pxTCB = prvGetTCBFromHandle( xTask );
33693536
configASSERT( pxTCB != NULL );
33703537
configASSERT( pxTCB->uxPreemptionDisable > 0U );
@@ -3381,8 +3548,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
33813548
{
33823549
if( ( xYieldPendings[ pxTCB->xTaskRunState ] != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) != pdFALSE ) )
33833550
{
3384-
prvYieldCore( pxTCB->xTaskRunState );
3385-
xAlreadyYielded = pdTRUE;
3551+
xTaskRequestedToYield = pdTRUE;
33863552
}
33873553
else
33883554
{
@@ -3400,8 +3566,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
34003566
mtCOVERAGE_TEST_MARKER();
34013567
}
34023568
}
3403-
#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3404-
vKernelLightWeightExitCritical();
3569+
#if ( configUSE_TCB_DATA_GROUP_LOCK == 1 )
3570+
vTaskTCBExitCritical();
34053571
#else
34063572
kernelEXIT_CRITICAL();
34073573
#endif
@@ -3424,6 +3590,27 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
34243590
/* Any deferred action on the task would result in a context switch. */
34253591
xAlreadyYielded = pdTRUE;
34263592
}
3593+
else
3594+
{
3595+
if( xTaskRequestedToYield != pdFALSE )
3596+
{
3597+
/* prvYieldCore must be called in critical section. */
3598+
kernelENTER_CRITICAL();
3599+
{
3600+
pxTCB = prvGetTCBFromHandle( xTask );
3601+
3602+
/* There is gap between TCB critical section and kernel critical section.
3603+
* Checking the yield pending again to prevent that the current task
3604+
* already handle the yield request. */
3605+
if( ( xYieldPendings[ pxTCB->xTaskRunState ] != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) != pdFALSE ) )
3606+
{
3607+
prvYieldCore( pxTCB->xTaskRunState );
3608+
}
3609+
}
3610+
kernelEXIT_CRITICAL();
3611+
xAlreadyYielded = pdTRUE;
3612+
}
3613+
}
34273614

34283615
return xAlreadyYielded;
34293616
}
@@ -7576,7 +7763,8 @@ static void prvResetNextTaskUnblockTime( void )
75767763
* interrupt. Only assert if the critical nesting count is 1 to
75777764
* protect against recursive calls if the assert function also uses a
75787765
* critical section. */
7579-
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U )
7766+
if( ( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U ) &&
7767+
( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) )
75807768
{
75817769
portASSERT_IF_IN_ISR();
75827770

0 commit comments

Comments
 (0)