>From kernel 3.4 netdevice structure has delayed_work in net_device->pm_qos_req. OVS has its own workq implementation which redefines delayed_work. Therefore we can no longer redefine delay_work in ovs.
Following patch defines compat workq as ovs specific workq library. Signed-off-by: Pravin B Shelar <pshe...@nicira.com> --- datapath/Modules.mk | 4 +- datapath/datapath.c | 13 +++-- datapath/linux/Modules.mk | 5 +- datapath/linux/compat/include/linux/workqueue.h | 69 ----------------------- datapath/ovs_workqueue.h | 56 ++++++++++++++++++ datapath/tunnel.c | 12 ++-- datapath/{linux/compat => }/workqueue.c | 60 ++++++++++++-------- 7 files changed, 111 insertions(+), 108 deletions(-) delete mode 100644 datapath/linux/compat/include/linux/workqueue.h create mode 100644 datapath/ovs_workqueue.h rename datapath/{linux/compat => }/workqueue.c (67%) diff --git a/datapath/Modules.mk b/datapath/Modules.mk index 24c1075..96c5400 100644 --- a/datapath/Modules.mk +++ b/datapath/Modules.mk @@ -26,7 +26,8 @@ openvswitch_sources = \ vport-gre.c \ vport-internal_dev.c \ vport-netdev.c \ - vport-patch.c + vport-patch.c \ + workqueue.c openvswitch_headers = \ checksum.h \ @@ -35,6 +36,7 @@ openvswitch_headers = \ dp_sysfs.h \ flow.h \ genl_exec.h \ + ovs_workqueue.h \ tunnel.h \ vlan.h \ vport.h \ diff --git a/datapath/datapath.c b/datapath/datapath.c index 605253d..71d8da8 100644 --- a/datapath/datapath.c +++ b/datapath/datapath.c @@ -56,6 +56,7 @@ #include "datapath.h" #include "flow.h" #include "genl_exec.h" +#include "ovs_workqueue.h" #include "vlan.h" #include "tunnel.h" #include "vport-internal_dev.h" @@ -66,8 +67,8 @@ #endif #define REHASH_FLOW_INTERVAL (10 * 60 * HZ) -static void rehash_flow_table(struct work_struct *work); -static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table); +static void rehash_flow_table(struct ovs_work_struct *work); +static OVS_DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table); int ovs_net_id __read_mostly; @@ -2132,10 +2133,10 @@ static int __rehash_flow_table(void *dummy) return 0; } -static void rehash_flow_table(struct work_struct *work) +static void rehash_flow_table(struct ovs_work_struct *work) { genl_exec(__rehash_flow_table, NULL); - schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); + ovs_schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); } static int dp_destroy_all(void *data) @@ -2213,7 +2214,7 @@ static int __init dp_init(void) if (err < 0) goto error_unreg_notifier; - schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); + ovs_schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); return 0; @@ -2237,7 +2238,7 @@ error: static void dp_cleanup(void) { - cancel_delayed_work_sync(&rehash_flow_wq); + ovs_cancel_delayed_work_sync(&rehash_flow_wq); dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); unregister_netdevice_notifier(&ovs_dp_device_notifier); unregister_pernet_device(&ovs_net_ops); diff --git a/datapath/linux/Modules.mk b/datapath/linux/Modules.mk index 8ce6115..db09f0e 100644 --- a/datapath/linux/Modules.mk +++ b/datapath/linux/Modules.mk @@ -10,8 +10,8 @@ openvswitch_sources += \ linux/compat/net_namespace.c \ linux/compat/reciprocal_div.c \ linux/compat/skbuff-openvswitch.c \ - linux/compat/time.c \ - linux/compat/workqueue.c + linux/compat/time.c + openvswitch_headers += \ linux/compat/include/linux/compiler.h \ linux/compat/include/linux/compiler-gcc.h \ @@ -55,7 +55,6 @@ openvswitch_headers += \ linux/compat/include/linux/types.h \ linux/compat/include/linux/u64_stats_sync.h \ linux/compat/include/linux/udp.h \ - linux/compat/include/linux/workqueue.h \ linux/compat/include/net/checksum.h \ linux/compat/include/net/dst.h \ linux/compat/include/net/genetlink.h \ diff --git a/datapath/linux/compat/include/linux/workqueue.h b/datapath/linux/compat/include/linux/workqueue.h deleted file mode 100644 index 79158f9..0000000 --- a/datapath/linux/compat/include/linux/workqueue.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef __LINUX_WORKQUEUE_WRAPPER_H -#define __LINUX_WORKQUEUE_WRAPPER_H 1 - -#include <linux/timer.h> - -int __init ovs_workqueues_init(void); -void ovs_workqueues_exit(void); - -/* Older kernels have an implementation of work queues with some very bad - * characteristics when trying to cancel work (potential deadlocks, use after - * free, etc. Therefore we implement simple ovs specific work queue using - * single worker thread. work-queue API are kept similar for compatibility. - * It seems it is useful even on newer kernel. As it can avoid system wide - * freeze in event of softlockup due to workq blocked on genl_lock. - */ - -struct work_struct; - -typedef void (*work_func_t)(struct work_struct *work); - -#define work_data_bits(work) ((unsigned long *)(&(work)->data)) - -struct work_struct { -#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ - atomic_long_t data; - struct list_head entry; - work_func_t func; -}; - -#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) - -#define work_clear_pending(work) \ - clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) - -struct delayed_work { - struct work_struct work; - struct timer_list timer; -}; - -#define __WORK_INITIALIZER(n, f) { \ - .data = WORK_DATA_INIT(), \ - .entry = { &(n).entry, &(n).entry }, \ - .func = (f), \ -} - -#define __DELAYED_WORK_INITIALIZER(n, f) { \ - .work = __WORK_INITIALIZER((n).work, (f)), \ - .timer = TIMER_INITIALIZER(NULL, 0, 0), \ -} - -#define DECLARE_DELAYED_WORK(n, f) \ - struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) - -#define schedule_delayed_work rpl_schedule_delayed_work -int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); - -#define cancel_delayed_work_sync rpl_cancel_delayed_work_sync -int cancel_delayed_work_sync(struct delayed_work *dwork); - -#define INIT_WORK(_work, _func) \ - do { \ - (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ - INIT_LIST_HEAD(&(_work)->entry); \ - (_work)->func = (_func); \ - } while (0) - -extern void flush_scheduled_work(void); - -#endif diff --git a/datapath/ovs_workqueue.h b/datapath/ovs_workqueue.h new file mode 100644 index 0000000..d716215 --- /dev/null +++ b/datapath/ovs_workqueue.h @@ -0,0 +1,56 @@ +#ifndef __OVS_WORKQUEUE_H +#define __OVS_WORKQUEUE_H 1 + +#include <linux/timer.h> + +int __init ovs_workqueues_init(void); +void ovs_workqueues_exit(void); + +/* Older kernels have an implementation of work queues with some very bad + * characteristics when trying to cancel work (potential deadlocks, use after + * free, etc. Therefore we implement simple ovs specific work queue using + * single worker thread. work-queue API are kept similar for compatibility. + * It seems it is useful even on newer kernel. As it can avoid system wide + * freeze in event of softlockup due to workq blocked on genl_lock. + */ + +struct ovs_work_struct; + +typedef void (*ovs_work_func_t)(struct ovs_work_struct *work); + + +struct ovs_work_struct { +#define OVS_WORK_STRUCT_PENDING 0 /* T if work item pending execution */ + atomic_long_t data; + struct list_head entry; + ovs_work_func_t func; +}; + +struct ovs_delayed_work { + struct ovs_work_struct work; + struct timer_list timer; +}; + +#define OVS_WORK_DATA_INIT() ATOMIC_LONG_INIT(0) + +#define __OVS_WORK_INITIALIZER(n, f) { \ + .data = OVS_WORK_DATA_INIT(), \ + .entry = { &(n).entry, &(n).entry }, \ + .func = (f), \ +} + +#define __OVS_DELAYED_WORK_INITIALIZER(n, f) { \ + .work = __OVS_WORK_INITIALIZER((n).work, (f)), \ + .timer = TIMER_INITIALIZER(NULL, 0, 0), \ +} + +#define OVS_DECLARE_DELAYED_WORK(n, f) \ + struct ovs_delayed_work n = __OVS_DELAYED_WORK_INITIALIZER(n, f) + +#define ovs_schedule_delayed_work rpl_ovs_schedule_delayed_work +int ovs_schedule_delayed_work(struct ovs_delayed_work *dwork, unsigned long delay); + +#define ovs_cancel_delayed_work_sync rpl_ovs_cancel_delayed_work_sync +int ovs_cancel_delayed_work_sync(struct ovs_delayed_work *dwork); + +#endif diff --git a/datapath/tunnel.c b/datapath/tunnel.c index d651c11..b7f0c1a 100644 --- a/datapath/tunnel.c +++ b/datapath/tunnel.c @@ -30,7 +30,6 @@ #include <linux/list.h> #include <linux/kernel.h> #include <linux/version.h> -#include <linux/workqueue.h> #include <linux/rculist.h> #include <net/dsfield.h> @@ -46,6 +45,7 @@ #include "checksum.h" #include "datapath.h" +#include "ovs_workqueue.h" #include "tunnel.h" #include "vlan.h" #include "vport.h" @@ -87,8 +87,8 @@ static struct hlist_head *port_table __read_mostly; static int port_table_count; -static void cache_cleaner(struct work_struct *work); -static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner); +static void cache_cleaner(struct ovs_work_struct *work); +static OVS_DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner); /* * These are just used as an optimization: they don't require any kind of @@ -137,7 +137,7 @@ static struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport) static void schedule_cache_cleaner(void) { - schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL); + ovs_schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL); } static void free_cache(struct tnl_cache *cache) @@ -273,7 +273,7 @@ static void port_table_remove_port(struct vport *vport) port_table_count--; if (port_table_count == 0) - cancel_delayed_work_sync(&cache_cleaner_wq); + ovs_cancel_delayed_work_sync(&cache_cleaner_wq); (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--; } @@ -858,7 +858,7 @@ static void __cache_cleaner(struct tnl_vport *tnl_vport) } } -static void cache_cleaner(struct work_struct *work) +static void cache_cleaner(struct ovs_work_struct *work) { int i; diff --git a/datapath/linux/compat/workqueue.c b/datapath/workqueue.c similarity index 67% rename from datapath/linux/compat/workqueue.c rename to datapath/workqueue.c index 9934f1a..ed8d0a6 100644 --- a/datapath/linux/compat/workqueue.c +++ b/datapath/workqueue.c @@ -11,7 +11,6 @@ #include <linux/init.h> #include <linux/signal.h> #include <linux/completion.h> -#include <linux/workqueue.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/notifier.h> @@ -23,13 +22,28 @@ #include <linux/lockdep.h> #include <linux/idr.h> +#include "ovs_workqueue.h" + static spinlock_t wq_lock; static struct list_head workq; static wait_queue_head_t more_work; static struct task_struct *workq_thread; -static struct work_struct *current_work; +static struct ovs_work_struct *current_work; + +#define OVS_INIT_WORK(_work, _func) \ + do { \ + (_work)->data = (atomic_long_t) OVS_WORK_DATA_INIT(); \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->func = (_func); \ + } while (0) + +#define ovs_work_data_bits(work) ((unsigned long *)(&(work)->data)) + +#define ovs_work_clear_pending(work) \ + clear_bit(OVS_WORK_STRUCT_PENDING, ovs_work_data_bits(work)) + -static void queue_work(struct work_struct *work) +static void ovs_queue_work(struct ovs_work_struct *work) { unsigned long flags; @@ -41,15 +55,15 @@ static void queue_work(struct work_struct *work) static void _delayed_work_timer_fn(unsigned long __data) { - struct delayed_work *dwork = (struct delayed_work *)__data; - queue_work(&dwork->work); + struct ovs_delayed_work *dwork = (struct ovs_delayed_work *)__data; + ovs_queue_work(&dwork->work); } -static void __queue_delayed_work(struct delayed_work *dwork, +static void __queue_delayed_work(struct ovs_delayed_work *dwork, unsigned long delay) { struct timer_list *timer = &dwork->timer; - struct work_struct *work = &dwork->work; + struct ovs_work_struct *work = &dwork->work; BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); @@ -61,13 +75,13 @@ static void __queue_delayed_work(struct delayed_work *dwork, add_timer(timer); } -int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) +int ovs_schedule_delayed_work(struct ovs_delayed_work *dwork, unsigned long delay) { - if (test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(&dwork->work))) + if (test_and_set_bit(OVS_WORK_STRUCT_PENDING, ovs_work_data_bits(&dwork->work))) return 0; if (delay == 0) - queue_work(&dwork->work); + ovs_queue_work(&dwork->work); else __queue_delayed_work(dwork, delay); @@ -75,17 +89,17 @@ int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) } struct wq_barrier { - struct work_struct work; + struct ovs_work_struct work; struct completion done; }; -static void wq_barrier_func(struct work_struct *work) +static void wq_barrier_func(struct ovs_work_struct *work) { struct wq_barrier *barr = container_of(work, struct wq_barrier, work); complete(&barr->done); } -static void workqueue_barrier(struct work_struct *work) +static void workqueue_barrier(struct ovs_work_struct *work) { bool need_barrier; struct wq_barrier barr; @@ -94,7 +108,7 @@ static void workqueue_barrier(struct work_struct *work) if (current_work != work) need_barrier = false; else { - INIT_WORK(&barr.work, wq_barrier_func); + OVS_INIT_WORK(&barr.work, wq_barrier_func); init_completion(&barr.done); list_add(&barr.work.entry, &workq); wake_up(&more_work); @@ -106,13 +120,13 @@ static void workqueue_barrier(struct work_struct *work) wait_for_completion(&barr.done); } -static int try_to_grab_pending(struct work_struct *work) +static int try_to_grab_pending(struct ovs_work_struct *work) { int ret; BUG_ON(in_interrupt()); - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) + if (!test_and_set_bit(OVS_WORK_STRUCT_PENDING, ovs_work_data_bits(work))) return 0; spin_lock_irq(&wq_lock); @@ -127,7 +141,7 @@ static int try_to_grab_pending(struct work_struct *work) return ret; } -static int __cancel_work_timer(struct work_struct *work, +static int __cancel_work_timer(struct ovs_work_struct *work, struct timer_list *timer) { int ret; @@ -143,11 +157,11 @@ static int __cancel_work_timer(struct work_struct *work, break; } workqueue_barrier(work); - work_clear_pending(work); + ovs_work_clear_pending(work); return ret; } -int cancel_delayed_work_sync(struct delayed_work *dwork) +int ovs_cancel_delayed_work_sync(struct ovs_delayed_work *dwork) { return __cancel_work_timer(&dwork->work, &dwork->timer); } @@ -156,15 +170,15 @@ static void run_workqueue(void) { spin_lock_irq(&wq_lock); while (!list_empty(&workq)) { - struct work_struct *work = list_entry(workq.next, - struct work_struct, entry); + struct ovs_work_struct *work = list_entry(workq.next, + struct ovs_work_struct, entry); - work_func_t f = work->func; + ovs_work_func_t f = work->func; list_del_init(workq.next); current_work = work; spin_unlock_irq(&wq_lock); - work_clear_pending(work); + ovs_work_clear_pending(work); f(work); BUG_ON(in_interrupt()); -- 1.7.10 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev