xiaoxiang781216 commented on code in PR #16673: URL: https://github.com/apache/nuttx/pull/16673#discussion_r2229245524
########## sched/sched/sched_unlock.c: ########## @@ -81,18 +89,28 @@ void sched_unlock(void) sched_note_preemption(rtcb, false); /* Release any ready-to-run tasks that have collected in - * g_pendingtasks. + * g_pendingtasks (or in g_readytorun for SMP) * * NOTE: This operation has a very high likelihood of causing * this task to be switched out! */ - if (list_pendingtasks()->head != NULL) +#ifdef CONFIG_SMP + /* If this task is the lowest priority task running across all + * CPUs, there may be some higher priority task pending because + * of the sched lock. + */ + + ptcb = (FAR struct tcb_s *)dq_peek(list_readytorun()); + if (ptcb && nxsched_select_cpu(ALL_CPUS) == rtcb->cpu && + ptcb->sched_priority > rtcb->sched_priority && + nxsched_deliver_task(rtcb->cpu, rtcb->cpu)) Review Comment: why not call nxsched_switch_running directly ########## sched/sched/sched_unlock.c: ########## @@ -81,18 +89,28 @@ void sched_unlock(void) sched_note_preemption(rtcb, false); /* Release any ready-to-run tasks that have collected in - * g_pendingtasks. + * g_pendingtasks (or in g_readytorun for SMP) * * NOTE: This operation has a very high likelihood of causing * this task to be switched out! */ - if (list_pendingtasks()->head != NULL) +#ifdef CONFIG_SMP + /* If this task is the lowest priority task running across all + * CPUs, there may be some higher priority task pending because + * of the sched lock. + */ + + ptcb = (FAR struct tcb_s *)dq_peek(list_readytorun()); + if (ptcb && nxsched_select_cpu(ALL_CPUS) == rtcb->cpu && + ptcb->sched_priority > rtcb->sched_priority && + nxsched_deliver_task(rtcb->cpu, rtcb->cpu)) +#else + ptcb = (FAR struct tcb_s *)dq_peek(list_pendingtasks()); + if (ptcb && nxsched_merge_pending()) Review Comment: why need check ptcb before nxsched_merge_pending ########## sched/sched/sched.h: ########## @@ -518,23 +521,51 @@ static inline_function bool nxsched_add_prioritized(FAR struct tcb_s *tcb, } # ifdef CONFIG_SMP + +/* Try to switch the head of the ready-to-run list to active on "cpu". + * "curr_cpu" is "this_cpu()", and passed only for optimization. + */ + +static inline_function bool nxsched_deliver_task(int cpu, int curr_cpu) Review Comment: let's change argument to (int cpu, int target_cpu) ########## sched/sched/sched_suspend.c: ########## @@ -172,9 +176,13 @@ void nxsched_suspend(FAR struct tcb_s *tcb) { switch_needed = nxsched_remove_readytorun(tcb); - if (list_pendingtasks()->head) + if (!nxsched_islocked_tcb(rtcb) || switch_needed) Review Comment: why not check switch_needed first ########## sched/sched/sched.h: ########## @@ -518,23 +514,51 @@ static inline_function bool nxsched_add_prioritized(FAR struct tcb_s *tcb, } # ifdef CONFIG_SMP + +/* Try to switch the head of the ready-to-run list to active on "cpu". + * "curr_cpu" is "this_cpu()", and passed only for optimization. + */ + +static inline_function bool nxsched_deliver_task(int cpu, int curr_cpu) +{ + bool ret = false; + + /* If there is already a schedule interrupt pending, there is + * no need to do anything now. + */ + + if (!g_delivertasks[cpu]) Review Comment: should we change to: ``` if (cpu == curr_cpu) { ret = nxsched_switch_running(cpu); } else if (!g_delivertasks[cpu]) { g_delivertasks[cpu] = true; up_send_smp_sched(cpu); } ``` ########## sched/sched/sched_unlock.c: ########## @@ -81,18 +89,28 @@ void sched_unlock(void) sched_note_preemption(rtcb, false); /* Release any ready-to-run tasks that have collected in - * g_pendingtasks. + * g_pendingtasks (or in g_readytorun for SMP) * * NOTE: This operation has a very high likelihood of causing * this task to be switched out! */ - if (list_pendingtasks()->head != NULL) +#ifdef CONFIG_SMP + /* If this task is the lowest priority task running across all + * CPUs, there may be some higher priority task pending because + * of the sched lock. + */ + + ptcb = (FAR struct tcb_s *)dq_peek(list_readytorun()); + if (ptcb && nxsched_select_cpu(ALL_CPUS) == rtcb->cpu && Review Comment: should we pass the affinity mask of ptcb instead ALL_CPUS? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org