Fix-Point commented on code in PR #16231: URL: https://github.com/apache/nuttx/pull/16231#discussion_r2074630963
########## sched/wqueue/kwork_queue.c: ########## @@ -141,65 +80,72 @@ int work_queue_period_wq(FAR struct kwork_wqueue_s *wqueue, FAR void *arg, clock_t delay, clock_t period) { irqstate_t flags; - int ret = OK; + clock_t expected; + bool wake = false; + int ret = OK; if (wqueue == NULL || work == NULL || worker == NULL) { return -EINVAL; } + /* delay+1 is to prevent the insufficient sleep time if we are + * currently near the boundary to the next tick. + * | current_tick | current_tick + 1 | current_tick + 2 | .... | + * | ^ Here we get the current tick + * In this case we delay 1 tick, timer will be triggered at + * current_tick + 1, which is not enough for at least 1 tick. + */ + + expected = clock_systime_ticks() + delay + 1; + /* Interrupts are disabled so that this logic can be called from with * task logic or from interrupt handling logic. */ flags = spin_lock_irqsave(&wqueue->lock); - sched_lock(); - /* Remove the entry from the timer and work queue. */ + /* Check whether we own the work structure. */ - if (work->worker != NULL) + if (!work_available(work)) { - /* Remove the entry from the work queue and make sure that it is - * marked as available (i.e., the worker field is nullified). - */ - - work->worker = NULL; - wd_cancel(&work->u.timer); - - list_delete(&work->u.s.node); - } + /* Seize the ownership from the work thread. */ - if (work_is_canceling(wqueue->worker, wqueue->nthreads, work)) - { - goto out; + list_delete(&work->node); Review Comment: We can check if the `curr_node` is the list, which is cache-friendly and can reduce one memory access comparing to call `list_is_head(&wqueue->pending, &work->node)`. ########## sched/wqueue/kwork_queue.c: ########## @@ -141,65 +80,72 @@ int work_queue_period_wq(FAR struct kwork_wqueue_s *wqueue, FAR void *arg, clock_t delay, clock_t period) { irqstate_t flags; - int ret = OK; + clock_t expected; + bool wake = false; + int ret = OK; if (wqueue == NULL || work == NULL || worker == NULL) { return -EINVAL; } + /* delay+1 is to prevent the insufficient sleep time if we are + * currently near the boundary to the next tick. + * | current_tick | current_tick + 1 | current_tick + 2 | .... | + * | ^ Here we get the current tick + * In this case we delay 1 tick, timer will be triggered at + * current_tick + 1, which is not enough for at least 1 tick. + */ + + expected = clock_systime_ticks() + delay + 1; + /* Interrupts are disabled so that this logic can be called from with * task logic or from interrupt handling logic. */ flags = spin_lock_irqsave(&wqueue->lock); - sched_lock(); - /* Remove the entry from the timer and work queue. */ + /* Check whether we own the work structure. */ - if (work->worker != NULL) + if (!work_available(work)) { - /* Remove the entry from the work queue and make sure that it is - * marked as available (i.e., the worker field is nullified). - */ - - work->worker = NULL; - wd_cancel(&work->u.timer); - - list_delete(&work->u.s.node); - } + /* Seize the ownership from the work thread. */ - if (work_is_canceling(wqueue->worker, wqueue->nthreads, work)) - { - goto out; + list_delete(&work->node); Review Comment: We can check if the `curr_node` is the list, which is cache-friendly and can reduce one memory access comparing to call `list_is_head(&wqueue->pending, &work->node)`. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org