To catch any bisection issues, we had been replacing parts of the tasklet code one functionality on top of each other. Now that all of it is per-cpu and working we can remove the old scaffolding and collapse functions.
We also remove the 'is_percpu' flag that is not needed anymore. Most of this is code deletion and code motion. No new functionality is added. Signed-off-by: Konrad Rzeszutek Wilk <konrad.w...@oracle.com> --- RFC: First version v1: Posted, folks asked if ticketlocks fixed it. v2: Intel confirmed at XPDS 2016 that the problem is still present with large guests. Cc: Jan Beulich <jbeul...@suse.com> Cc: Andrew Cooper <andrew.coop...@citrix.com> Cc: "Lan, Tianyu" <tianyu....@intel.com> Cc: Kevin Tian <kevin.t...@intel.com> Cc: Jun Nakajima <jun.nakaj...@intel.com> Cc: George Dunlap <george.dun...@eu.citrix.com> Cc: Ian Jackson <ian.jack...@eu.citrix.com> Cc: Stefano Stabellini <sstabell...@kernel.org> Cc: Tim Deegan <t...@xen.org> Cc: Wei Liu <wei.l...@citrix.com> --- xen/common/tasklet.c | 110 ++++++++++++++++++---------------------------- xen/include/xen/tasklet.h | 9 ++-- 2 files changed, 46 insertions(+), 73 deletions(-) diff --git a/xen/common/tasklet.c b/xen/common/tasklet.c index 618e73b..192fa79 100644 --- a/xen/common/tasklet.c +++ b/xen/common/tasklet.c @@ -49,7 +49,6 @@ static void percpu_tasklet_feed(void *arg) while ( !list_empty(list) ) { t = list_entry(list->next, struct tasklet, list); - BUG_ON(!t->is_percpu); list_del(&t->list); if ( t->is_softirq ) @@ -76,59 +75,44 @@ out: static void tasklet_enqueue(struct tasklet *t) { unsigned int cpu = t->scheduled_on; + unsigned long flags; + struct list_head *list; - if ( t->is_percpu ) - { - unsigned long flags; - struct list_head *list; - - INIT_LIST_HEAD(&t->list); - - if ( cpu != smp_processor_id() ) - { - spin_lock_irqsave(&feeder_lock, flags); - - list = &per_cpu(tasklet_feeder, cpu); - list_add_tail(&t->list, list); - - spin_unlock_irqrestore(&feeder_lock, flags); - on_selected_cpus(cpumask_of(cpu), percpu_tasklet_feed, NULL, 1); - return; - } - if ( t->is_softirq ) - { - - local_irq_save(flags); - - list = &__get_cpu_var(softirq_list); - list_add_tail(&t->list, list); - raise_softirq(TASKLET_SOFTIRQ); + INIT_LIST_HEAD(&t->list); - local_irq_restore(flags); - return; - } - else - { - unsigned long *work_to_do = &__get_cpu_var(tasklet_work_to_do); + if ( cpu != smp_processor_id() ) + { + spin_lock_irqsave(&feeder_lock, flags); - local_irq_save(flags); + list = &per_cpu(tasklet_feeder, cpu); + list_add_tail(&t->list, list); - list = &__get_cpu_var(tasklet_list); - list_add_tail(&t->list, list); - if ( !test_and_set_bit(_TASKLET_enqueued, work_to_do) ) - raise_softirq(SCHEDULE_SOFTIRQ); + spin_unlock_irqrestore(&feeder_lock, flags); + on_selected_cpus(cpumask_of(cpu), percpu_tasklet_feed, NULL, 1); + return; + } + if ( t->is_softirq ) + { + local_irq_save(flags); + + list = &__get_cpu_var(softirq_list); + list_add_tail(&t->list, list); + raise_softirq(TASKLET_SOFTIRQ); + + local_irq_restore(flags); + } + else + { + unsigned long *work_to_do = &__get_cpu_var(tasklet_work_to_do); + + local_irq_save(flags); + + list = &__get_cpu_var(tasklet_list); + list_add_tail(&t->list, list); + if ( !test_and_set_bit(_TASKLET_enqueued, work_to_do) ) + raise_softirq(SCHEDULE_SOFTIRQ); - local_irq_restore(flags); - return; - } - } - if ( t->is_softirq ) - { - BUG(); - } - else - { - BUG(); + local_irq_restore(flags); } } @@ -137,16 +121,11 @@ void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu) if ( !tasklets_initialised || t->is_dead ) return; - if ( t->is_percpu ) + if ( !test_and_set_bit(TASKLET_STATE_SCHED, &t->state) ) { - if ( !test_and_set_bit(TASKLET_STATE_SCHED, &t->state) ) - { - t->scheduled_on = cpu; - tasklet_enqueue(t); - } - return; + t->scheduled_on = cpu; + tasklet_enqueue(t); } - BUG(); } void tasklet_schedule(struct tasklet *t) @@ -306,19 +285,15 @@ static void tasklet_softirq_action(void) void tasklet_kill(struct tasklet *t) { - if ( t->is_percpu ) + while ( test_and_set_bit(TASKLET_STATE_SCHED, &t->state) ) { - while ( test_and_set_bit(TASKLET_STATE_SCHED, &t->state) ) - { - do { + do { process_pending_softirqs(); - } while ( test_bit(TASKLET_STATE_SCHED, &t->state) ); - } - tasklet_unlock_wait(t); - clear_bit(TASKLET_STATE_SCHED, &t->state); - t->is_dead = 1; - return; + } while ( test_bit(TASKLET_STATE_SCHED, &t->state) ); } + tasklet_unlock_wait(t); + clear_bit(TASKLET_STATE_SCHED, &t->state); + t->is_dead = 1; } static void migrate_tasklets_from_cpu(unsigned int cpu, struct list_head *list) @@ -348,7 +323,6 @@ void tasklet_init( t->scheduled_on = -1; t->func = func; t->data = data; - t->is_percpu = 1; } void softirq_tasklet_init( diff --git a/xen/include/xen/tasklet.h b/xen/include/xen/tasklet.h index 21efe7b..b7a6a81 100644 --- a/xen/include/xen/tasklet.h +++ b/xen/include/xen/tasklet.h @@ -22,19 +22,18 @@ struct tasklet bool_t is_softirq; bool_t is_running; bool_t is_dead; - bool_t is_percpu; void (*func)(unsigned long); unsigned long data; }; -#define _DECLARE_TASKLET(name, func, data, softirq, percpu) \ +#define _DECLARE_TASKLET(name, func, data, softirq) \ struct tasklet name = { \ - LIST_HEAD_INIT(name.list), 0, -1, softirq, 0, 0, percpu, \ + LIST_HEAD_INIT(name.list), 0, -1, softirq, 0, 0, \ func, data } #define DECLARE_TASKLET(name, func, data) \ - _DECLARE_TASKLET(name, func, data, 0, 0) + _DECLARE_TASKLET(name, func, data, 0) #define DECLARE_SOFTIRQ_TASKLET(name, func, data) \ - _DECLARE_TASKLET(name, func, data, 1, 1) + _DECLARE_TASKLET(name, func, data, 1) /* Indicates status of tasklet work on each CPU. */ DECLARE_PER_CPU(unsigned long, tasklet_work_to_do); -- 2.4.11 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org https://lists.xen.org/xen-devel