xiaoxiang781216 commented on code in PR #10605: URL: https://github.com/apache/nuttx/pull/10605#discussion_r1328039393
########## sched/semaphore/spinlock.c: ########## @@ -384,4 +384,176 @@ void spin_clrbit(FAR volatile cpu_set_t *set, unsigned int cpu, } #endif +/**************************************************************************** + * Name: fair_spinlock_list_init + * + * Description: + * Initialize the link linsk of fair spinlock. + * If there is any spinlock node, the node will get lock. + * + * Input Parameters: + * lock_list - A reference to the spinlock list. + * + * Returned Value: + * None + * + * Assumptions: + * None. + * + ****************************************************************************/ + +void fair_spinlock_list_init(fair_spinlock_list_t *lock_list) +{ + lock_list->lock = SP_UNLOCKED; + list_initialize(&lock_list->list); +} + +/**************************************************************************** + * Name: fair_spinlock_init + * + * Description: + * Initialize the fair spinlock. Tje lock state is set as SP_LOCK util + * some give lock to holder. + * + * Input Parameters: + * lock - A reference to the spinlock object to lock. + * + * Returned Value: + * None + * + * Assumptions: + * Not running at the interrupt level. + * + ****************************************************************************/ + +void fair_spinlock_init(fair_spinlock_t *lock) +{ + lock->lock = SP_LOCKED; + lock->holder = this_task(); + list_initialize(&lock->node); +} + +/**************************************************************************** + * Name: fair_spin_lock + * + * Description: + * Try to lock the spinlock and insert this lock to lock list. + * If list is empty, the caller can get the lock. + * If list is not empty, the lock will be inserted to this list. + * + * Input Parameters: + * lock_list - A reference to fair lock list and insert this lock to list. + * fair_lock - A reference to the spinlock object to lock. + * + * Returned Value: + * None + * + * Assumptions: + * Not running at the interrupt level. + * + ****************************************************************************/ + +void fair_spin_lock(FAR fair_spinlock_list_t *lock_list, + FAR fair_spinlock_t *fair_lock) +{ + FAR struct tcb_s *rtcb = this_task(); + fair_spinlock_t *entry; + fair_spinlock_t *temp; + +#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS + /* Notify that we have the spinlock */ + + sched_note_spinlock(this_task(), &fair_lock->lock, NOTE_SPINLOCK_LOCKED); +#endif + + /* There is no thread wait this lock */ + + spin_lock(&lock_list->lock); + + /* There is no node hold this lock */ + + if (list_is_empty(&lock_list->list)) + { + fair_lock->lock = SP_UNLOCKED; + list_add_head(&lock_list->list, &fair_lock->node); + } + + /* There is a task hold this lock */ + + else + { + /* priority based link list */ + + list_for_every_entry_safe(&lock_list->list, entry, temp, + struct fair_spinlock_s, node) + { + if (entry->holder->sched_priority > rtcb->sched_priority) + { + break; + } + } + + list_add_before(&entry->node, &fair_lock->node); + } + + spin_unlock(&lock_list->lock); + + while (up_testset(&fair_lock->lock) == SP_LOCKED) Review Comment: If you want to use MCS algo to reduce the cache contention, the per CPU structure is required: https://lwn.net/Articles/590243/ But since NuttX's SMP is designed with a small number of CPU cores(2~4), tick spinlock is enough here. If NuttX want to support 10+ or even 100+ cores, many design must be changed, for example: 1. One big critical section need be removed. 2. One timer interrupt to drive all core scheduling need change to per CPU timer. and more. Before the core design get fixed, I don't see the benefit to use MCS lock or Qspinlocks. ########## sched/semaphore/spinlock.c: ########## @@ -384,4 +384,176 @@ void spin_clrbit(FAR volatile cpu_set_t *set, unsigned int cpu, } #endif +/**************************************************************************** + * Name: fair_spinlock_list_init + * + * Description: + * Initialize the link linsk of fair spinlock. + * If there is any spinlock node, the node will get lock. + * + * Input Parameters: + * lock_list - A reference to the spinlock list. + * + * Returned Value: + * None + * + * Assumptions: + * None. + * + ****************************************************************************/ + +void fair_spinlock_list_init(fair_spinlock_list_t *lock_list) +{ + lock_list->lock = SP_UNLOCKED; + list_initialize(&lock_list->list); +} + +/**************************************************************************** + * Name: fair_spinlock_init + * + * Description: + * Initialize the fair spinlock. Tje lock state is set as SP_LOCK util + * some give lock to holder. + * + * Input Parameters: + * lock - A reference to the spinlock object to lock. + * + * Returned Value: + * None + * + * Assumptions: + * Not running at the interrupt level. + * + ****************************************************************************/ + +void fair_spinlock_init(fair_spinlock_t *lock) +{ + lock->lock = SP_LOCKED; + lock->holder = this_task(); + list_initialize(&lock->node); +} + +/**************************************************************************** + * Name: fair_spin_lock + * + * Description: + * Try to lock the spinlock and insert this lock to lock list. + * If list is empty, the caller can get the lock. + * If list is not empty, the lock will be inserted to this list. + * + * Input Parameters: + * lock_list - A reference to fair lock list and insert this lock to list. + * fair_lock - A reference to the spinlock object to lock. + * + * Returned Value: + * None + * + * Assumptions: + * Not running at the interrupt level. + * + ****************************************************************************/ + +void fair_spin_lock(FAR fair_spinlock_list_t *lock_list, + FAR fair_spinlock_t *fair_lock) +{ + FAR struct tcb_s *rtcb = this_task(); + fair_spinlock_t *entry; + fair_spinlock_t *temp; + +#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS + /* Notify that we have the spinlock */ + + sched_note_spinlock(this_task(), &fair_lock->lock, NOTE_SPINLOCK_LOCKED); +#endif + + /* There is no thread wait this lock */ + + spin_lock(&lock_list->lock); + + /* There is no node hold this lock */ + + if (list_is_empty(&lock_list->list)) + { + fair_lock->lock = SP_UNLOCKED; + list_add_head(&lock_list->list, &fair_lock->node); + } + + /* There is a task hold this lock */ + + else + { + /* priority based link list */ + + list_for_every_entry_safe(&lock_list->list, entry, temp, + struct fair_spinlock_s, node) + { + if (entry->holder->sched_priority > rtcb->sched_priority) + { + break; + } + } + + list_add_before(&entry->node, &fair_lock->node); + } + + spin_unlock(&lock_list->lock); + + while (up_testset(&fair_lock->lock) == SP_LOCKED) Review Comment: If you want to use MCS algo to reduce the cache contention, the per CPU structure is required: https://lwn.net/Articles/590243/ But since NuttX's SMP is designed with a small number of CPU cores(2~4), tick spinlock is enough here. If NuttX want to support 10+ or even 100+ cores, many design must be changed, for example: 1. One big critical section need be removed. 2. One timer to drive all core scheduling need change to per CPU timer. and more. Before the core design get fixed, I don't see the benefit to use MCS lock or Qspinlocks. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org