xiaoxiang781216 commented on code in PR #16673: URL: https://github.com/apache/nuttx/pull/16673#discussion_r2234004537
########## sched/sched/sched.h: ########## @@ -558,8 +603,8 @@ static inline_function int nxsched_select_cpu(cpu_set_t affinity) } } - DEBUGASSERT(cpu != 0xff); return cpu; } + Review Comment: revert the change ########## sched/init/nx_start.c: ########## @@ -141,7 +129,9 @@ FAR struct tcb_s *g_running_tasks[CONFIG_SMP_NCPUS]; * currently active task has disabled pre-emption. */ +#ifndef CONFIG_SMP Review Comment: move to previous patch ########## sched/sched/sched.h: ########## @@ -132,6 +134,17 @@ struct tasklist_s uint8_t attr; /* List attribute flags */ }; +/* This enumeration defines smp schedule task switch rule */ + +enum task_deliver_e +{ + NOT_ASSIGNED = 0, /* No schedule switch pending */ + SWITCH_HIGHER = 1, /* Higher priority task needs to be scheduled in */ + SWITCH_EQUAL /* Higher or equal priority task needs to be scheduled Review Comment: why need distinguish higher or equal ########## sched/sched/sched_addreadytorun.c: ########## @@ -114,170 +114,164 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) return ret; } -#endif /* !CONFIG_SMP */ + +#else /* !CONFIG_SMP */ /**************************************************************************** - * Name: nxsched_add_readytorun + * Name: nxsched_switch_running * * Description: - * This function adds a TCB to one of the ready to run lists. That might - * be: - * - * 1. The g_readytorun list if the task is ready-to-run but not running - * and not assigned to a CPU. - * 2. The g_assignedtask[cpu] list if the task is running or if has been - * assigned to a CPU. - * - * If the currently active task has preemption disabled and the new TCB - * would cause this task to be preempted, the new task is added to the - * g_pendingtasks list instead. The pending tasks will be made - * ready-to-run when preemption isunlocked. + * This function switches the head of the current CPU's assigned tasks + * list to the TCB given as parameter. The idle task is not switched out. + * If the running task can't be swapped out, the btcb is pushed to + * the ready-to-run list. * * Input Parameters: - * btcb - Points to the blocked TCB that is ready-to-run + * cpu - Always this_cpu(). Given as argument only for + * optimization + * switch_equal - When true, switch away a task of equal priority compared + * to the pending one * * Returned Value: - * true if the currently active task (the head of the ready-to-run list) - * has changed. + * true if the currently active task is switched to the btcb * * Assumptions: - * - The caller has established a critical section before calling this - * function (calling sched_lock() first is NOT a good idea -- use - * enter_critical_section()). + * - The caller has established a critical section * - The caller has already removed the input rtcb from whatever list it * was in. * - The caller handles the condition that occurs if the head of the - * ready-to-run list has changed. + * assigned tasks list has changed. * ****************************************************************************/ -#ifdef CONFIG_SMP -bool nxsched_add_readytorun(FAR struct tcb_s *btcb) +bool nxsched_switch_running(int cpu, bool switch_equal) { - FAR struct tcb_s *rtcb; - FAR struct tcb_s *headtcb; - FAR dq_queue_t *tasklist; - bool doswitch; - int task_state; - int cpu; - int me; - - cpu = nxsched_select_cpu(btcb->affinity); + FAR struct tcb_s *rtcb = current_task(cpu); + int sched_priority = rtcb->sched_priority; + FAR struct tcb_s *btcb; + bool ret = false; - /* Get the task currently running on the CPU (may be the IDLE task) */ + DEBUGASSERT(cpu == this_cpu()); - rtcb = current_task(cpu); - - /* Determine the desired new task state. First, if the new task priority - * is higher then the priority of the lowest priority, running task, then - * the new task will be running and a context switch switch will be - * required. - */ - - if (rtcb->sched_priority < btcb->sched_priority) + if (nxsched_islocked_tcb(rtcb)) { - task_state = TSTATE_TASK_RUNNING; + return false; } - else + + if (switch_equal) { - task_state = TSTATE_TASK_READYTORUN; + sched_priority--; } - /* If the selected state is TSTATE_TASK_RUNNING, then we would like to - * start running the task. Be we cannot do that if pre-emption is - * disabled. If the selected state is TSTATE_TASK_READYTORUN, then it - * should also go to the pending task list so that it will have a chance - * to be restarted when the scheduler is unlocked. - * - * There is an interaction here with IRQ locking. Even if the pre- - * emption is enabled, tasks will be forced to pend if the IRQ lock - * is also set UNLESS the CPU starting the thread is also the holder of - * the IRQ lock. irq_cpu_locked() performs an atomic check for that - * situation. + /* If there is a task in readytorun list, which is eglible to run on this + * CPU, and has higher priority than the current task, + * switch the current task to that one. */ - if (nxsched_islocked_tcb(this_task())) + for (btcb = (FAR struct tcb_s *)dq_peek(list_readytorun()); + btcb && btcb->sched_priority > sched_priority; + btcb = btcb->flink) { - /* Add the new ready-to-run task to the g_pendingtasks task list for - * now. + /* Check if the task found in ready-to-run list is allowed to run on + * this CPU */ - nxsched_add_prioritized(btcb, list_pendingtasks()); - btcb->task_state = TSTATE_TASK_PENDING; - doswitch = false; - } - else if (task_state == TSTATE_TASK_READYTORUN) - { - /* The new btcb was added either (1) in the middle of the assigned - * task list (the btcb->cpu field is already valid) or (2) was - * added to the ready-to-run list (the btcb->cpu field does not - * matter). Either way, it won't be running. - * - * Add the task to the ready-to-run (but not running) task list - */ + if (CPU_ISSET(cpu, &btcb->affinity)) + { + FAR dq_queue_t *tasklist = list_assignedtasks(cpu); - nxsched_add_prioritized(btcb, list_readytorun()); + /* Found a task, remove it from ready-to-run list */ - btcb->task_state = TSTATE_TASK_READYTORUN; - doswitch = false; - } - else /* (task_state == TSTATE_TASK_RUNNING) */ - { - /* If we are modifying some assigned task list other than our own, we - * will need to switch that CPU. - */ + dq_rem((FAR struct dq_entry_s *)btcb, list_readytorun()); - me = this_cpu(); - if (cpu != me) - { - if (g_delivertasks[cpu] == NULL) + /* Remove the current task from assigned tasks list and put it + * to the ready-to-run. But leave idle task. + */ + + if (!is_idle_task(rtcb)) { - g_delivertasks[cpu] = btcb; - btcb->cpu = cpu; - btcb->task_state = TSTATE_TASK_ASSIGNED; - up_send_smp_sched(cpu); + dq_remfirst(tasklist); + rtcb->task_state = TSTATE_TASK_READYTORUN; + nxsched_add_prioritized(rtcb, list_readytorun()); + + /* We should now have only the idle task assigned */ + + DEBUGASSERT( + is_idle_task((FAR struct tcb_s *)dq_peek(tasklist))); } else { - rtcb = g_delivertasks[cpu]; - if (rtcb->sched_priority < btcb->sched_priority) - { - g_delivertasks[cpu] = btcb; - btcb->cpu = cpu; - btcb->task_state = TSTATE_TASK_ASSIGNED; - nxsched_add_prioritized(rtcb, &g_readytorun); - rtcb->task_state = TSTATE_TASK_READYTORUN; - } - else - { - nxsched_add_prioritized(btcb, &g_readytorun); - btcb->task_state = TSTATE_TASK_READYTORUN; - } + rtcb->task_state = TSTATE_TASK_ASSIGNED; } - return false; - } - - tasklist = &g_assignedtasks[cpu]; - - /* Change "head" from TSTATE_TASK_RUNNING to TSTATE_TASK_ASSIGNED */ + dq_addfirst((FAR dq_entry_t *)btcb, tasklist); + up_update_task(btcb); - headtcb = (FAR struct tcb_s *)tasklist->head; - DEBUGASSERT(headtcb->task_state == TSTATE_TASK_RUNNING); - headtcb->task_state = TSTATE_TASK_ASSIGNED; + btcb->cpu = cpu; + btcb->task_state = TSTATE_TASK_RUNNING; + ret = true; + break; + } + } - /* Add btcb to the head of the g_assignedtasks - * task list and mark it as running - */ + return ret; +} - dq_addfirst_nonempty((FAR dq_entry_t *)btcb, tasklist); - up_update_task(btcb); +/**************************************************************************** + * Name: nxsched_add_readytorun + * + * Description: + * This function adds a TCB to one of the ready to run lists. The list + * will be: + * + * 1. The g_readytorun list if the task is ready-to-run but not running + * and not assigned to a CPU. + * 2. The g_assignedtask[cpu] list if the task is running or if has been + * assigned to a CPU. + * + * If the currently active task has preemption disabled and the new TCB + * would cause this task to be preempted, the new task is added to the + * g_pendingtasks list instead. The pending tasks will be made + * ready-to-run when preemption isunlocked. Review Comment: is unlocked ########## sched/sched/sched_process_delivered.c: ########## @@ -62,83 +62,43 @@ void nxsched_process_delivered(int cpu) { - FAR dq_queue_t *tasklist; - FAR struct tcb_s *next; - FAR struct tcb_s *prev; - struct tcb_s *btcb = NULL; - struct tcb_s *tcb; - DEBUGASSERT(g_cpu_nestcount[cpu] == 0); DEBUGASSERT(up_interrupt_context()); + enum task_deliver_e priority = g_delivertasks[cpu]; Review Comment: move before line 65 ########## sched/sched/sched_addreadytorun.c: ########## @@ -178,33 +178,23 @@ bool nxsched_switch_running(int cpu, bool switch_equal) if (CPU_ISSET(cpu, &btcb->affinity)) { - FAR dq_queue_t *tasklist = list_assignedtasks(cpu); - /* Found a task, remove it from ready-to-run list */ dq_rem((FAR struct dq_entry_s *)btcb, list_readytorun()); - /* Remove the current task from assigned tasks list and put it - * to the ready-to-run. But leave idle task. - */ - if (!is_idle_task(rtcb)) { - dq_remfirst(tasklist); + /* Put currently running task back to ready-to-run list */ + rtcb->task_state = TSTATE_TASK_READYTORUN; nxsched_add_prioritized(rtcb, list_readytorun()); - - /* We should now have only the idle task assigned */ Review Comment: let's don't add DEBUGASSERT in previous patch directly ########## sched/sched/sched_addreadytorun.c: ########## @@ -114,170 +114,164 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) return ret; } -#endif /* !CONFIG_SMP */ + +#else /* !CONFIG_SMP */ /**************************************************************************** - * Name: nxsched_add_readytorun + * Name: nxsched_switch_running * * Description: - * This function adds a TCB to one of the ready to run lists. That might - * be: - * - * 1. The g_readytorun list if the task is ready-to-run but not running - * and not assigned to a CPU. - * 2. The g_assignedtask[cpu] list if the task is running or if has been - * assigned to a CPU. - * - * If the currently active task has preemption disabled and the new TCB - * would cause this task to be preempted, the new task is added to the - * g_pendingtasks list instead. The pending tasks will be made - * ready-to-run when preemption isunlocked. + * This function switches the head of the current CPU's assigned tasks + * list to the TCB given as parameter. The idle task is not switched out. + * If the running task can't be swapped out, the btcb is pushed to + * the ready-to-run list. * * Input Parameters: - * btcb - Points to the blocked TCB that is ready-to-run + * cpu - Always this_cpu(). Given as argument only for + * optimization + * switch_equal - When true, switch away a task of equal priority compared + * to the pending one * * Returned Value: - * true if the currently active task (the head of the ready-to-run list) - * has changed. + * true if the currently active task is switched to the btcb * * Assumptions: - * - The caller has established a critical section before calling this - * function (calling sched_lock() first is NOT a good idea -- use - * enter_critical_section()). + * - The caller has established a critical section * - The caller has already removed the input rtcb from whatever list it * was in. * - The caller handles the condition that occurs if the head of the - * ready-to-run list has changed. + * assigned tasks list has changed. * ****************************************************************************/ -#ifdef CONFIG_SMP -bool nxsched_add_readytorun(FAR struct tcb_s *btcb) +bool nxsched_switch_running(int cpu, bool switch_equal) { - FAR struct tcb_s *rtcb; - FAR struct tcb_s *headtcb; - FAR dq_queue_t *tasklist; - bool doswitch; - int task_state; - int cpu; - int me; - - cpu = nxsched_select_cpu(btcb->affinity); + FAR struct tcb_s *rtcb = current_task(cpu); + int sched_priority = rtcb->sched_priority; + FAR struct tcb_s *btcb; + bool ret = false; - /* Get the task currently running on the CPU (may be the IDLE task) */ + DEBUGASSERT(cpu == this_cpu()); - rtcb = current_task(cpu); - - /* Determine the desired new task state. First, if the new task priority - * is higher then the priority of the lowest priority, running task, then - * the new task will be running and a context switch switch will be - * required. - */ - - if (rtcb->sched_priority < btcb->sched_priority) + if (nxsched_islocked_tcb(rtcb)) { - task_state = TSTATE_TASK_RUNNING; + return false; } - else + + if (switch_equal) { - task_state = TSTATE_TASK_READYTORUN; + sched_priority--; } - /* If the selected state is TSTATE_TASK_RUNNING, then we would like to - * start running the task. Be we cannot do that if pre-emption is - * disabled. If the selected state is TSTATE_TASK_READYTORUN, then it - * should also go to the pending task list so that it will have a chance - * to be restarted when the scheduler is unlocked. - * - * There is an interaction here with IRQ locking. Even if the pre- - * emption is enabled, tasks will be forced to pend if the IRQ lock - * is also set UNLESS the CPU starting the thread is also the holder of - * the IRQ lock. irq_cpu_locked() performs an atomic check for that - * situation. + /* If there is a task in readytorun list, which is eglible to run on this + * CPU, and has higher priority than the current task, + * switch the current task to that one. */ - if (nxsched_islocked_tcb(this_task())) + for (btcb = (FAR struct tcb_s *)dq_peek(list_readytorun()); + btcb && btcb->sched_priority > sched_priority; + btcb = btcb->flink) { - /* Add the new ready-to-run task to the g_pendingtasks task list for - * now. + /* Check if the task found in ready-to-run list is allowed to run on + * this CPU */ - nxsched_add_prioritized(btcb, list_pendingtasks()); - btcb->task_state = TSTATE_TASK_PENDING; - doswitch = false; - } - else if (task_state == TSTATE_TASK_READYTORUN) - { - /* The new btcb was added either (1) in the middle of the assigned - * task list (the btcb->cpu field is already valid) or (2) was - * added to the ready-to-run list (the btcb->cpu field does not - * matter). Either way, it won't be running. - * - * Add the task to the ready-to-run (but not running) task list - */ + if (CPU_ISSET(cpu, &btcb->affinity)) + { + FAR dq_queue_t *tasklist = list_assignedtasks(cpu); Review Comment: why need add local variable for assigned task, but not for ready to run? ########## sched/init/nx_start.c: ########## @@ -420,11 +399,11 @@ static void idle_task_initialize(void) */ #ifdef CONFIG_SMP - tasklist = TLIST_HEAD(tcb, i); + g_assignedtasks[i] = tcb; #else tasklist = TLIST_HEAD(tcb); -#endif dq_addfirst((FAR dq_entry_t *)tcb, tasklist); Review Comment: let's change `tasklist` to `TLIST_HEAD(tcb)` and remove line 325 directly -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org