jlaitine commented on code in PR #16673:
URL: https://github.com/apache/nuttx/pull/16673#discussion_r2196997364


##########
sched/sched/sched_mergepending.c:
##########
@@ -186,100 +186,97 @@ bool nxsched_merge_pending(void)
  ****************************************************************************/
 
 #ifdef CONFIG_SMP
+
 bool nxsched_merge_pending(void)
 {
   FAR struct tcb_s *rtcb;
   FAR struct tcb_s *ptcb;
-  FAR struct tcb_s *tcb;
+  FAR struct tcb_s *next;
   bool ret = false;
   int cpu;
+  int minprio;
 
-  /* Remove and process every TCB in the g_pendingtasks list.
-   *
-   * Do nothing if (1) pre-emption is still disabled (by any CPU), or (2) if
-   * some CPU other than this one is in a critical section.
-   */
+  ptcb = (FAR struct tcb_s *)dq_peek(list_readytorun());
+  if (ptcb == NULL)
+    {
+      /* The readytorun task list is empty. */
+
+      return false;
+    }
+
+  /* Find the CPU that is executing the lowest priority task. */
 
-  if (!nxsched_islocked_tcb(this_task()))
+  cpu = nxsched_select_cpu(ALL_CPUS);
+  if (cpu >= CONFIG_SMP_NCPUS)
     {
-      /* Find the CPU that is executing the lowest priority task */
+      /* There are no available CPUs. This can only happen if
+       * all the CPUs are running scheduler locked
+       */
 
-      ptcb = (FAR struct tcb_s *)dq_peek(list_pendingtasks());
-      if (ptcb == NULL)
-        {
-          /* The pending task list is empty */
+      return false;
+    }
 
-          return false;
-        }
+  rtcb = current_delivered(cpu);
+  minprio = rtcb->sched_priority;
 
-      cpu  = nxsched_select_cpu(ALL_CPUS); /* REVISIT:  Maybe ptcb->affinity */
-      rtcb = current_task(cpu);
+  /* Loop while there is a higher priority task in the ready-to-run list
+   * than in the lowest executing task.
+   *
+   * Normally, this loop should execute no more than CONFIG_SMP_NCPUS
+   * times.  That number could be larger, however, if the CPU affinity
+   * sets do not include all CPUs. In that case, the excess TCBs will
+   * be left in the g_readytorun list.
+   */
 
-      /* Loop while there is a higher priority task in the pending task list
-       * than in the lowest executing task.
-       *
-       * Normally, this loop should execute no more than CONFIG_SMP_NCPUS
-       * times.  That number could be larger, however, if the CPU affinity
-       * sets do not include all CPUs. In that case, the excess TCBs will
-       * end up in the g_readytorun list.
+  while (ptcb && ptcb->sched_priority > minprio)
+    {
+      /* If the ptcb is not allowed to run on this CPU, re-select the
+       * CPU. This is unlikely, so re-select is not worth doing on
+       * every cycle.
        */
 
-      while (ptcb->sched_priority > rtcb->sched_priority)
+      if (((1 << cpu) & ptcb->affinity) == 0)
         {
-          /* Remove the task from the pending task list */
-
-          tcb = (FAR struct tcb_s *)dq_remfirst(list_pendingtasks());
-
-          /* Add the pending task to the correct ready-to-run list. */
+          cpu  = nxsched_select_cpu(ptcb->affinity);
+          if (cpu >= CONFIG_SMP_NCPUS)
+            {
+              /* No available CPUs to run this task, try next one */
 
-          ret |= nxsched_add_readytorun(tcb);
+              continue;

Review Comment:
   This is a bug, would be an infinite loop. Will fix....



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to