xiaoxiang781216 commented on code in PR #10776: URL: https://github.com/apache/nuttx/pull/10776#discussion_r1354144604
########## sched/irq/irq_spinlock.c: ########## @@ -45,6 +45,17 @@ static volatile spinlock_t g_irq_spin = SP_UNLOCKED; static volatile uint8_t g_irq_spin_count[CONFIG_SMP_NCPUS]; +#ifdef CONFIG_RW_SPINLOCK +/* Used for access control */ + +static volatile rwlock_t g_irq_rw_spin = RW_SP_UNLOCKED; + +/* Handles nested calls to write_lock_irqsave and write_unlock_irqrestore */ + +static volatile uint8_t g_irq_write_spin_count[CONFIG_SMP_NCPUS]; Review Comment: ```suggestion static volatile uint8_t g_irq_rw_spin_count[CONFIG_SMP_NCPUS]; ``` ########## sched/semaphore/spinlock.c: ########## @@ -446,4 +451,222 @@ void spin_clrbit(FAR volatile cpu_set_t *set, unsigned int cpu, } #endif +#ifdef CONFIG_RW_SPINLOCK + +/**************************************************************************** + * Name: read_lock + * + * Description: + * If this task does not already hold the spinlock, then loop until the + * spinlock is successfully locked. + * + * This implementation is non-reentrant and set a bit of lock. + * + * The priority of reader is higher than writter if a reader hold the + * lock, a new reader can get its lock but writer can't get this lock. + * + * Input Parameters: + * lock - A reference to the spinlock object to lock. + * + * Returned Value: + * None. When the function returns, the spinlock was successfully locked + * by this CPU. + * + * Assumptions: + * Not running at the interrupt level. + * + ****************************************************************************/ + +void read_lock(FAR volatile rwlock_t *lock) +{ + rwlock_t old; + + while (true) + { + old = atomic_load(lock); + + if (old >= RW_SP_UNLOCKED && + atomic_compare_exchange_strong(lock, &old, old + 1)) + { + break; + } + else if (old < 0) + { + SP_DSB(); + SP_WFE(); + } + } + + SP_DMB(); +} + +/**************************************************************************** + * Name: read_trylock + * + * Description: + * If this task does not already hold the spinlock, then try to get the + * lock. + * + * This implementation is non-reentrant and set a bit of lock. + * + * The priority of reader is higher than writter if a reader hold the + * lock, a new reader can get its lock but writer can't get this lock. + * + * Input Parameters: + * lock - A reference to the spinlock object to lock. + * + * Returned Value: + * false - Failure, the spinlock was already locked + * true - Success, the spinlock was successfully locked + * + * Assumptions: + * Not running at the interrupt level. + * + ****************************************************************************/ + +bool read_trylock(FAR volatile rwlock_t *lock) +{ + rwlock_t old = atomic_load(lock); + + if (old >= RW_SP_UNLOCKED && + atomic_compare_exchange_strong(lock, &old, old + 1)) + { + SP_DMB(); + return true; + } + + return false; +} + +/**************************************************************************** + * Name: read_unlock + * + * Description: + * Release a bit on a non-reentrant spinlock. + * + * Input Parameters: + * lock - A reference to the spinlock object to unlock. + * + * Returned Value: + * None. + * + * Assumptions: + * Not running at the interrupt level. + * + ****************************************************************************/ + +void read_unlock(FAR volatile rwlock_t *lock) +{ + DEBUGASSERT(atomic_load(lock) >= RW_SP_READ_LOCKED); + + SP_DMB(); + atomic_fetch_add(lock, -1); + SP_DSB(); + SP_SEV(); +} + +/**************************************************************************** + * Name: write_lock + * + * Description: + * If this task does not already hold the spinlock, then loop until the + * spinlock is successfully locked. + * + * This implementation is non-reentrant and set all bit on lock to avoid + * readers and writers. + * + * The priority of reader is higher than writter if a reader hold the + * lock, a new reader can get its lock but writer can't get this lock. + * + * Input Parameters: + * lock - A reference to the spinlock object to lock. + * + * Returned Value: + * None. When the function returns, the spinlock was successfully locked + * by this CPU. + * + * Assumptions: + * Not running at the interrupt level. + * + ****************************************************************************/ + +void write_lock(FAR volatile rwlock_t *lock) +{ + while (!atomic_compare_exchange_strong(lock, &unlocked, \ + RW_SP_WRITE_LOCKED)) Review Comment: align ########## sched/semaphore/spinlock.c: ########## @@ -36,6 +36,11 @@ # include <stdatomic.h> #endif +#ifdef CONFIG_RW_SPINLOCK +# include <stdatomic.h> +const static rwlock_t unlocked = RW_SP_UNLOCKED; Review Comment: but, it's still better to define a local variable instead global. ########## sched/irq/irq_spinlock.c: ########## @@ -209,4 +220,202 @@ void spin_unlock_irqrestore_wo_note(spinlock_t *lock, irqstate_t flags) up_irq_restore(flags); } +#ifdef CONFIG_RW_SPINLOCK + +/**************************************************************************** + * Name: read_lock_irqsave + * + * Description: + * If SMP is are enabled: + * If the argument lock is not specified (i.e. NULL), disable local + * interrupts and take the global read write spinlock (g_irq_rw_spin) + * and increase g_irq_rw_spin. + * + * If the argument lock is specified, + * disable local interrupts and take the lock spinlock and return + * the interrupt state. + * + * NOTE: This API is very simple to protect data (e.g. H/W register + * or internal data structure) in SMP mode. But do not use this API + * with kernel APIs which suspend a caller thread. (e.g. nxsem_wait) + * + * If SMP is not enabled: + * This function is equivalent to up_irq_save(). + * + * Input Parameters: + * lock - Caller specific spinlock. If specified NULL, g_irq_spin is used + * and can be nested. Otherwise, nested call for the same lock + * would cause a deadlock + * + * Returned Value: + * An opaque, architecture-specific value that represents the state of + * the interrupts prior to the call to write_lock_irqsave(lock); + * + ****************************************************************************/ + +irqstate_t read_lock_irqsave(FAR rwlock_t *lock) +{ + irqstate_t ret; + ret = up_irq_save(); + + if (NULL == lock) + { + read_lock(&g_irq_rw_spin); + } + else + { + read_lock(lock); + } + + return ret; +} + +/**************************************************************************** + * Name: read_unlock_irqrestore + * + * Description: + * If SMP is enabled: + * If the argument lock is not specified (i.e. NULL), + * decrement the call counter (g_irq_rw_spin) and restore the interrupt + * state as it was prior to the previous call to read_lock_irqsave(NULL). + * + * If the argument lock is specified, release the lock and + * restore the interrupt state as it was prior to the previous call to + * read_lock_irqsave(lock). + * + * If SMP is not enabled: + * This function is equivalent to up_irq_restore(). + * + * Input Parameters: + * lock - Caller specific spinlock. If specified NULL, g_irq_spin is used. + * + * flags - The architecture-specific value that represents the state of + * the interrupts prior to the call to read_lock_irqsave(lock); + * + * Returned Value: + * None + * + ****************************************************************************/ + +void read_unlock_irqrestore(rwlock_t *lock, irqstate_t flags) +{ + if (NULL == lock) + { + read_unlock(&g_irq_rw_spin); + } + else + { + read_unlock(lock); + } + + up_irq_restore(flags); +} + +/**************************************************************************** + * Name: write_lock_irqsave + * + * Description: + * If SMP is are enabled: + * If the argument lock is not specified (i.e. NULL), + * disable local interrupts and take the global spinlock (g_irq_rw_spin) + * if the call counter (g_irq_write_spin_count[cpu]) equals to 0. Then + * the counter on the CPU is incremented to allow nested calls and return + * the interrupt state. + * + * If the argument lock is specified, + * disable local interrupts and take the lock spinlock and return + * the interrupt state. + * + * NOTE: This API is very simple to protect data (e.g. H/W register + * or internal data structure) in SMP mode. But do not use this API + * with kernel APIs which suspend a caller thread. (e.g. nxsem_wait) + * + * If SMP is not enabled: + * This function is equivalent to up_irq_save(). + * + * Input Parameters: + * lock - Caller specific spinlock. If specified NULL, g_irq_spin is used + * and can be nested. Otherwise, nested call for the same lock + * would cause a deadlock + * + * Returned Value: + * An opaque, architecture-specific value that represents the state of + * the interrupts prior to the call to write_lock_irqsave(lock); + * + ****************************************************************************/ + +irqstate_t write_lock_irqsave(rwlock_t *lock) +{ + irqstate_t ret; + ret = up_irq_save(); + + if (NULL == lock) + { + int me = this_cpu(); + if (0 == g_irq_write_spin_count[me]) Review Comment: why need g_irq_write_spin_count for write lock? ########## sched/semaphore/spinlock.c: ########## @@ -446,4 +451,222 @@ void spin_clrbit(FAR volatile cpu_set_t *set, unsigned int cpu, } #endif +#ifdef CONFIG_RW_SPINLOCK + +/**************************************************************************** + * Name: read_lock + * + * Description: + * If this task does not already hold the spinlock, then loop until the + * spinlock is successfully locked. + * + * This implementation is non-reentrant and set a bit of lock. + * + * The priority of reader is higher than writter if a reader hold the + * lock, a new reader can get its lock but writer can't get this lock. + * + * Input Parameters: + * lock - A reference to the spinlock object to lock. + * + * Returned Value: + * None. When the function returns, the spinlock was successfully locked + * by this CPU. + * + * Assumptions: + * Not running at the interrupt level. + * + ****************************************************************************/ + +void read_lock(FAR volatile rwlock_t *lock) +{ + rwlock_t old; + + while (true) + { + old = atomic_load(lock); + + if (old >= RW_SP_UNLOCKED && + atomic_compare_exchange_strong(lock, &old, old + 1)) + { + break; + } + else if (old < 0) + { + SP_DSB(); + SP_WFE(); + } + } + + SP_DMB(); +} + +/**************************************************************************** + * Name: read_trylock + * + * Description: + * If this task does not already hold the spinlock, then try to get the + * lock. + * + * This implementation is non-reentrant and set a bit of lock. + * + * The priority of reader is higher than writter if a reader hold the + * lock, a new reader can get its lock but writer can't get this lock. + * + * Input Parameters: + * lock - A reference to the spinlock object to lock. + * + * Returned Value: + * false - Failure, the spinlock was already locked + * true - Success, the spinlock was successfully locked + * + * Assumptions: + * Not running at the interrupt level. + * + ****************************************************************************/ + +bool read_trylock(FAR volatile rwlock_t *lock) +{ + rwlock_t old = atomic_load(lock); + + if (old >= RW_SP_UNLOCKED && + atomic_compare_exchange_strong(lock, &old, old + 1)) + { + SP_DMB(); + return true; + } + + return false; +} + +/**************************************************************************** + * Name: read_unlock + * + * Description: + * Release a bit on a non-reentrant spinlock. + * + * Input Parameters: + * lock - A reference to the spinlock object to unlock. + * + * Returned Value: + * None. + * + * Assumptions: + * Not running at the interrupt level. + * + ****************************************************************************/ + +void read_unlock(FAR volatile rwlock_t *lock) +{ + DEBUGASSERT(atomic_load(lock) >= RW_SP_READ_LOCKED); + + SP_DMB(); + atomic_fetch_add(lock, -1); + SP_DSB(); + SP_SEV(); +} + +/**************************************************************************** + * Name: write_lock + * + * Description: + * If this task does not already hold the spinlock, then loop until the + * spinlock is successfully locked. + * + * This implementation is non-reentrant and set all bit on lock to avoid + * readers and writers. + * + * The priority of reader is higher than writter if a reader hold the + * lock, a new reader can get its lock but writer can't get this lock. + * + * Input Parameters: + * lock - A reference to the spinlock object to lock. + * + * Returned Value: + * None. When the function returns, the spinlock was successfully locked + * by this CPU. + * + * Assumptions: + * Not running at the interrupt level. + * + ****************************************************************************/ + +void write_lock(FAR volatile rwlock_t *lock) +{ + while (!atomic_compare_exchange_strong(lock, &unlocked, \ Review Comment: remove \ ########## include/nuttx/spinlock.h: ########## @@ -32,6 +32,14 @@ #include <nuttx/irq.h> +#ifdef CONFIG_RW_SPINLOCK +#include <stdatomic.h> +typedef atomic_int rwlock_t; Review Comment: can we use the normal type to avoid include stdatomic.h in the header file? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org