xiaoxiang781216 commented on code in PR #14578:
URL: https://github.com/apache/nuttx/pull/14578#discussion_r1924172641


##########
include/nuttx/spinlock.h:
##########
@@ -499,6 +499,50 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
 #  define spin_lock_irqsave(l) ((void)(l), up_irq_save())
 #endif
 
+/****************************************************************************
+ * Name: raw_spin_lock_irqsave

Review Comment:
   rename line 441 instead



##########
include/nuttx/spinlock.h:
##########
@@ -233,6 +233,8 @@ static inline_function void raw_spin_lock(FAR volatile 
spinlock_t *lock)
 #ifdef CONFIG_SPINLOCK
 static inline_function void spin_lock(FAR volatile spinlock_t *lock)

Review Comment:
   need map spin_lock to sched_lock if !CONFIG_SPINLOCK for 
spin_lock/spin_try_lock/spin_unlock



##########
include/nuttx/spinlock.h:
##########
@@ -499,6 +499,50 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
 #  define spin_lock_irqsave(l) ((void)(l), up_irq_save())
 #endif
 
+/****************************************************************************
+ * Name: raw_spin_lock_irqsave
+ *
+ * Description:
+ *   If SMP is enabled:
+ *     Disable local interrupts and take the lock spinlock and return
+ *     the interrupt state.
+ *
+ *     NOTE: This API is very simple to protect data (e.g. H/W register
+ *     or internal data structure) in SMP mode. But do not use this API
+ *     with kernel APIs which suspend a caller thread. (e.g. nxsem_wait)
+ *
+ *   If SMP is not enabled:
+ *     This function is equivalent to up_irq_save().
+ *
+ * Input Parameters:
+ *   lock - Caller specific spinlock. not NULL.
+ *
+ * Returned Value:
+ *   An opaque, architecture-specific value that represents the state of
+ *   the interrupts prior to the call to raw_spin_lock_irqsave(lock);
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_SPINLOCK
+static inline_function
+irqstate_t raw_spin_lock_irqsave(FAR volatile spinlock_t *lock)
+{
+  irqstate_t flags;
+
+  flags = up_irq_save();
+  raw_spin_lock(lock);
+
+  return flags;
+}
+#else
+static inline_function
+irqstate_t raw_spin_lock_irqsave(FAR volatile spinlock_t *lock)
+{
+  irqstate_t flags = up_irq_save();
+  return flags;
+}
+#endif
+
 /****************************************************************************
  * Name: spin_trylock_irqsave

Review Comment:
   add raw_spin_trylock_irqsave and let spin_trylock_irqsave call it



##########
include/nuttx/spinlock.h:
##########
@@ -595,6 +639,41 @@ void spin_unlock_irqrestore(FAR volatile spinlock_t *lock,
 #  define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
 #endif
 
+/****************************************************************************
+ * Name: raw_spin_unlock_irqrestore
+ *
+ * Description:
+ *   If SMP is enabled:
+ *     Release the lock and restore the interrupt state as it was prior
+ *     to the previous call to raw_spin_lock_irqsave(lock).
+ *
+ *   If SMP is not enabled:
+ *     This function is equivalent to up_irq_restore().
+ *
+ * Input Parameters:
+ *   lock - Caller specific spinlock. not NULL
+ *
+ *   flags - The architecture-specific value that represents the state of
+ *           the interrupts prior to the call to raw_spin_lock_irqsave(lock);
+ *
+ * Returned Value:
+ *   None
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_SPINLOCK
+static inline_function
+void raw_spin_unlock_irqrestore(FAR volatile spinlock_t *lock,
+                            irqstate_t flags)
+{
+  raw_spin_unlock(lock);
+
+  up_irq_restore(flags);
+}
+#else
+#  define raw_spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))

Review Comment:
   merge to previous patch



##########
include/nuttx/irq.h:
##########
@@ -80,7 +80,7 @@
   do \
     { \
       g_cpu_irqset = 0; \

Review Comment:
   can we use atomic op and remove g_cpu_irqlock? if the answer is yes, let's 
create a new pr to simplify it



##########
include/sched.h:
##########
@@ -265,8 +265,10 @@ int    sched_cpucount(FAR const cpu_set_t *set);
 
 /* Task Switching Interfaces (non-standard) */
 
-int    sched_lock(void);
-int    sched_unlock(void);
+void   sched_lock_wo_note(void);

Review Comment:
   remove _wo_note since no caller now



##########
include/nuttx/spinlock.h:
##########
@@ -595,6 +639,41 @@ void spin_unlock_irqrestore(FAR volatile spinlock_t *lock,
 #  define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
 #endif
 
+/****************************************************************************
+ * Name: raw_spin_unlock_irqrestore

Review Comment:
   rename line 592 directly



##########
include/nuttx/spinlock.h:
##########
@@ -1003,7 +1048,15 @@ void read_unlock_irqrestore(FAR rwlock_t *lock, 
irqstate_t flags);
 #ifdef CONFIG_SPINLOCK
 irqstate_t write_lock_irqsave(FAR rwlock_t *lock);

Review Comment:
   do you need modify source file



##########
include/nuttx/irq.h:
##########
@@ -75,16 +75,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#ifdef CONFIG_SMP
-#  define cpu_irqlock_clear() \

Review Comment:
   is cpu_irqlock_clear and restore_critical_section called by arch code? let's 
keep in include/nuttx/sched.h



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to