Skip to content

Commit 7417c83

Browse files
committed
cortex-r82: Minor code improvements
This commit includes minor code improvements to enhance readability and maintainability of the Cortex-R82 port files. Changes include refactoring variable names, optimizing comments, and improving code structure without altering functionality. Signed-off-by: Ahmed Ismail <Ahmed.Ismail@arm.com>
1 parent a9b9085 commit 7417c83

3 files changed

Lines changed: 155 additions & 129 deletions

File tree

portable/GCC/ARM_CR82/port.c

Lines changed: 84 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,6 @@
229229
* assembly code so is implemented in portASM.s.
230230
*/
231231
extern void vPortRestoreTaskContext( void );
232-
233232
extern void vGIC_EnableIRQ( uint32_t ulInterruptID );
234233
extern void vGIC_SetPriority( uint32_t ulInterruptID, uint32_t ulPriority );
235234
extern void vGIC_PowerUpRedistributor( void );
@@ -238,28 +237,38 @@ extern void vGIC_EnableCPUInterface( void );
238237
/*-----------------------------------------------------------*/
239238

240239
#if ( configNUMBER_OF_CORES == 1 )
240+
241241
PRIVILEGED_DATA volatile uint64_t ullCriticalNesting = 0ULL;
242242

243-
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
244-
* then floating point context must be saved and restored for the task. */
243+
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
244+
* then floating point context must be saved and restored for the task. */
245245
PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext = pdFALSE;
246246

247-
/* Set to 1 to pend a context switch from an ISR. */
247+
/* Set to 1 to pend a context switch from an ISR. */
248248
PRIVILEGED_DATA uint64_t ullPortYieldRequired = pdFALSE;
249249

250-
/* Counts the interrupt nesting depth. A context switch is only performed if
251-
* if the nesting depth is 0. */
250+
/* Counts the interrupt nesting depth. A context switch is only performed if
251+
* if the nesting depth is 0. */
252252
PRIVILEGED_DATA uint64_t ullPortInterruptNesting = 0;
253+
253254
#else /* #if ( configNUMBER_OF_CORES == 1 ) */
254255
PRIVILEGED_DATA volatile uint64_t ullCriticalNestings[ configNUMBER_OF_CORES ] = { 0 };
255256

256257
/* Flags to check if the secondary cores are ready. */
257258
PRIVILEGED_DATA volatile uint8_t ucSecondaryCoresReadyFlags[ configNUMBER_OF_CORES - 1 ] = { 0 };
259+
260+
/* Flag to signal that the primary core has done all the shared initialisations. */
258261
PRIVILEGED_DATA volatile uint8_t ucPrimaryCoreInitDoneFlag = 0;
259-
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
262+
263+
/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
260264
* then floating point context must be saved and restored for the task. */
261265
PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext[ configNUMBER_OF_CORES ] = { pdFALSE };
266+
267+
/* Set to 1 to pend a context switch from an ISR. */
262268
PRIVILEGED_DATA uint64_t ullPortYieldRequired[ configNUMBER_OF_CORES ] = { pdFALSE };
269+
270+
/* Counts the interrupt nesting depth. A context switch is only performed if
271+
* if the nesting depth is 0. */
263272
PRIVILEGED_DATA uint64_t ullPortInterruptNestings[ configNUMBER_OF_CORES ] = { 0 };
264273

265274
#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
@@ -1168,12 +1177,12 @@ BaseType_t xPortStartScheduler( void )
11681177
volatile uint8_t ucMaxPriorityValue;
11691178

11701179
/* Determine how many priority bits are implemented in the GIC.
1171-
*
1172-
* Save the interrupt priority value that is about to be clobbered. */
1180+
*
1181+
* Save the interrupt priority value that is about to be clobbered. */
11731182
ucOriginalPriority = *pucFirstUserPriorityRegister;
11741183

11751184
/* Determine the number of priority bits available. First write to
1176-
* all possible bits. */
1185+
* all possible bits. */
11771186
*pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
11781187

11791188
/* Read the value back to see how many bits stuck. */
@@ -1186,12 +1195,12 @@ BaseType_t xPortStartScheduler( void )
11861195
}
11871196

11881197
/* Sanity check configUNIQUE_INTERRUPT_PRIORITIES matches the read
1189-
* value. */
1198+
* value. */
11901199
configASSERT( ucMaxPriorityValue >= portLOWEST_INTERRUPT_PRIORITY );
11911200

11921201

11931202
/* Restore the clobbered interrupt priority register to its original
1194-
* value. */
1203+
* value. */
11951204
*pucFirstUserPriorityRegister = ucOriginalPriority;
11961205
}
11971206
#endif /* configASSERT_DEFINED */
@@ -1229,9 +1238,9 @@ BaseType_t xPortStartScheduler( void )
12291238
while( 1 )
12301239
{
12311240
BaseType_t xAllCoresReady = pdTRUE;
1232-
for( uint32_t ulCoreID = 0; ulCoreID < ( configNUMBER_OF_CORES - 1 ); ulCoreID++ )
1241+
for( uint8_t ucCoreID = 0; ucCoreID < ( configNUMBER_OF_CORES - 1 ); ucCoreID++ )
12331242
{
1234-
if( ucSecondaryCoresReadyFlags[ ulCoreID ] != pdTRUE )
1243+
if( ucSecondaryCoresReadyFlags[ ucCoreID ] != pdTRUE )
12351244
{
12361245
xAllCoresReady = pdFALSE;
12371246
break;
@@ -1530,16 +1539,16 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
15301539
#if ( configNUMBER_OF_CORES > 1 )
15311540

15321541
/* Which core owns the lock? Keep in privileged, shareable RAM. */
1533-
PRIVILEGED_DATA volatile uint64_t ucOwnedByCore[ portMAX_CORE_COUNT ];
1542+
PRIVILEGED_DATA volatile uint64_t ullOwnedByCore[ portMAX_CORE_COUNT ];
15341543
/* Lock count a core owns. */
1535-
PRIVILEGED_DATA volatile uint64_t ucRecursionCountByLock[ eLockCount ];
1544+
PRIVILEGED_DATA volatile uint64_t ullRecursionCountByLock[ eLockCount ];
15361545
/* Index 0 is used for ISR lock and Index 1 is used for task lock. */
15371546
PRIVILEGED_DATA uint32_t ulGateWord[ eLockCount ];
15381547

1539-
void vInterruptCore( uint32_t ulInterruptID, uint32_t ulCoreID )
1548+
void vInterruptCore( uint32_t ulInterruptID, uint8_t ucCoreID )
15401549
{
15411550
uint64_t ulRegVal = 0;
1542-
uint32_t ulCoreMask = ( 1UL << ulCoreID );
1551+
uint32_t ulCoreMask = ( 1UL << ucCoreID );
15431552
ulRegVal |= ( (ulCoreMask & 0xFFFF) | ( ( ulInterruptID & 0xF ) << 24U ) );
15441553
__asm volatile (
15451554
"str x0, [ sp, #-0x10 ]! \n"
@@ -1556,13 +1565,14 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
15561565

15571566
static inline void prvSpinUnlock( uint32_t * ulLock )
15581567
{
1568+
/* Conservative unlock: preserve original barriers for broad HW/FVP. */
15591569
__asm volatile (
1560-
"dmb sy\n"
1561-
"mov w1, #0\n"
1562-
"str w1, [%x0]\n"
1563-
"sev\n"
1564-
"dsb sy\n"
1565-
"isb sy\n"
1570+
"dmb sy \n"
1571+
"mov w1, #0 \n"
1572+
"str w1, [%x0] \n"
1573+
"sev \n"
1574+
"dsb sy \n"
1575+
"isb sy \n"
15661576
:
15671577
: "r" ( ulLock )
15681578
: "memory", "w1"
@@ -1573,22 +1583,30 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
15731583

15741584
static inline uint32_t prvSpinTrylock( uint32_t * ulLock )
15751585
{
1586+
/*
1587+
* Conservative LDXR/STXR trylock:
1588+
* - Return 1 immediately if busy, clearing exclusive state (CLREX).
1589+
* - Retry STXR only on spurious failure when observed free.
1590+
* - DMB on success to preserve expected acquire semantics.
1591+
*/
15761592
register uint32_t ulRet;
1577-
/* Try to acquire spinlock; caller is responsible for further barriers. */
15781593
__asm volatile (
1579-
"1:\n"
1580-
"ldxr w1, [%x1]\n"
1581-
"cmp w1, #1\n"
1582-
"beq 2f\n"
1583-
"mov w2, #1\n"
1584-
"stxr w1, w2, [%x1]\n"
1585-
"cmp w1, #0\n"
1586-
"bne 1b\n"
1587-
"2:\n"
1588-
"mov %w0, w1\n"
1594+
"1: \n"
1595+
"ldxr w1, [%x1] \n"
1596+
"cbnz w1, 2f \n" /* Busy -> return 1 */
1597+
"mov w2, #1 \n"
1598+
"stxr w3, w2, [%x1] \n" /* w3 = status */
1599+
"cbnz w3, 1b \n" /* Retry on STXR failure */
1600+
"dmb sy \n" /* Acquire barrier on success */
1601+
"mov %w0, #0 \n" /* Success */
1602+
"b 3f \n"
1603+
"2: \n"
1604+
"clrex \n" /* Clear monitor when busy */
1605+
"mov %w0, #1 \n" /* Busy */
1606+
"3: \n"
15891607
: "=r" ( ulRet )
15901608
: "r" ( ulLock )
1591-
: "memory", "w1", "w2"
1609+
: "memory", "w1", "w2", "w3"
15921610
);
15931611

15941612
return ulRet;
@@ -1615,12 +1633,12 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
16151633

16161634
/*-----------------------------------------------------------*/
16171635

1618-
void vPortRecursiveLock( BaseType_t xCoreID,
1636+
void vPortRecursiveLock( uint8_t ucCoreID,
16191637
ePortRTOSLock eLockNum,
16201638
BaseType_t uxAcquire )
16211639
{
16221640
/* Validate the core ID and lock number. */
1623-
configASSERT( xCoreID < portMAX_CORE_COUNT );
1641+
configASSERT( ucCoreID < portMAX_CORE_COUNT );
16241642
configASSERT( eLockNum < eLockCount );
16251643

16261644
uint32_t ulLockBit = 1u << eLockNum;
@@ -1636,10 +1654,10 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
16361654
if( prvSpinTrylock( &ulGateWord[ eLockNum ] ) != 0 )
16371655
{
16381656
/* Check if the core owns the spinlock. */
1639-
if( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ulLockBit )
1657+
if( prvGet64( &ullOwnedByCore[ ucCoreID ] ) & ulLockBit )
16401658
{
1641-
configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) != 255u );
1642-
prvSet64( &ucRecursionCountByLock[ eLockNum ], ( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) + 1 ) );
1659+
configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) != 255u );
1660+
prvSet64( &ullRecursionCountByLock[ eLockNum ], ( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) + 1 ) );
16431661
return;
16441662
}
16451663

@@ -1649,40 +1667,48 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
16491667

16501668
while( prvSpinTrylock( &ulGateWord[ eLockNum ] ) != 0 )
16511669
{
1652-
/* Follow Arm's recommended way of sleeping
1653-
* sevl is used to prime the wait loop,
1654-
* the first wfe wakes immediately as sevl has set the flag
1655-
* the second wfe sleeps the core. This way the core is ensured
1656-
* to sleep.
1657-
*/
1658-
__asm volatile ( "sevl; wfe; wfe" );
1670+
/* Follow Arm's recommended way of sleeping
1671+
* sevl is used to prime the wait loop.
1672+
* The first wfe wakes immediately because sevl has set the flag.
1673+
* Check the lock, if it's not available, issue a second wfe to sleep.
1674+
* This guarantees the core actually goes to sleep.
1675+
*/
1676+
__asm volatile (
1677+
"sevl \n"
1678+
"1: wfe \n"
1679+
"ldr w2, [%x0] \n"
1680+
"cbnz w2, 1b \n"
1681+
:
1682+
: "r" ( &ulGateWord[ eLockNum ] )
1683+
: "memory", "w2"
1684+
);
16591685
}
16601686
}
16611687

16621688
/* Add barrier to ensure lock is taken before we proceed. */
16631689
__asm__ __volatile__ ( "dmb sy" ::: "memory" );
16641690

16651691
/* Assert the lock count is 0 when the spinlock is free and is acquired. */
1666-
configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) == 0 );
1692+
configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) == 0 );
16671693

16681694
/* Set lock count as 1. */
1669-
prvSet64( &ucRecursionCountByLock[ eLockNum ], 1 );
1670-
/* Set ucOwnedByCore. */
1671-
prvSet64( &ucOwnedByCore[ xCoreID ], ( prvGet64( &ucOwnedByCore[ xCoreID ] ) | ulLockBit ) );
1695+
prvSet64( &ullRecursionCountByLock[ eLockNum ], 1 );
1696+
/* Set ullOwnedByCore. */
1697+
prvSet64( &ullOwnedByCore[ ucCoreID ], ( prvGet64( &ullOwnedByCore[ ucCoreID ] ) | ulLockBit ) );
16721698
}
16731699
/* Lock release. */
16741700
else
16751701
{
16761702
/* Assert the lock is not free already. */
1677-
configASSERT( ( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ulLockBit ) != 0 );
1678-
configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) != 0 );
1703+
configASSERT( ( prvGet64( &ullOwnedByCore[ ucCoreID ] ) & ulLockBit ) != 0 );
1704+
configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) != 0 );
16791705

1680-
/* Reduce ucRecursionCountByLock by 1. */
1681-
prvSet64( &ucRecursionCountByLock[ eLockNum ], ( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) - 1 ) );
1706+
/* Reduce ullRecursionCountByLock by 1. */
1707+
prvSet64( &ullRecursionCountByLock[ eLockNum ], ( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) - 1 ) );
16821708

1683-
if( !prvGet64( &ucRecursionCountByLock[ eLockNum ] ) )
1709+
if( !prvGet64( &ullRecursionCountByLock[ eLockNum ] ) )
16841710
{
1685-
prvSet64( &ucOwnedByCore[ xCoreID ], ( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ~ulLockBit ) );
1711+
prvSet64( &ullOwnedByCore[ ucCoreID ], ( prvGet64( &ullOwnedByCore[ ucCoreID ] ) & ~ulLockBit ) );
16861712
prvSpinUnlock( &ulGateWord[ eLockNum ] );
16871713
/* Add barrier to ensure lock status is reflected before we proceed. */
16881714
__asm__ __volatile__ ( "dmb sy" ::: "memory" );

0 commit comments

Comments
 (0)