On Mon, Jun 30, 2014 at 6:01 PM, Jesse Gross <je...@nicira.com> wrote: > The upstream u64_stats API has been changed to remove the _bh() > versions and switch all consumers to use IRQ safe variants instead. > This was done to be safe for netpoll generated packets, which can > occur in hard IRQ context. From a safety perspective, this doesn't > directly affect OVS since it doesn't support netpoll. However, this > change has been backported to older kernels so OVS needs to use the > new API to compile. > > Signed-off-by: Jesse Gross <je...@nicira.com> > --- > acinclude.m4 | 2 ++ > datapath/datapath.c | 4 +-- > .../linux/compat/include/linux/u64_stats_sync.h | 37 > ++++++++++------------ > datapath/vport.c | 4 +-- > 4 files changed, 23 insertions(+), 24 deletions(-) > > diff --git a/acinclude.m4 b/acinclude.m4 > index c761366..aa9ffcd 100644 > --- a/acinclude.m4 > +++ b/acinclude.m4 > @@ -348,6 +348,8 @@ AC_DEFUN([OVS_CHECK_LINUX_COMPAT], [ > > OVS_GREP_IFELSE([$KSRC/include/linux/percpu.h], [this_cpu_ptr]) > > + OVS_GREP_IFELSE([$KSRC/include/linux/u64_stats_sync.h], > [u64_stats_fetch_begin_irq]) > + > OVS_GREP_IFELSE([$KSRC/include/linux/openvswitch.h], > [openvswitch_handle_frame_hook], > [OVS_DEFINE([HAVE_RHEL_OVS_HOOK])]) > > diff --git a/datapath/datapath.c b/datapath/datapath.c > index e504fee..a7a6c3c 100644 > --- a/datapath/datapath.c > +++ b/datapath/datapath.c > @@ -673,9 +673,9 @@ static void get_dp_stats(struct datapath *dp, struct > ovs_dp_stats *stats, > percpu_stats = per_cpu_ptr(dp->stats_percpu, i); > > do { > - start = u64_stats_fetch_begin_bh(&percpu_stats->sync); > + start = > u64_stats_fetch_begin_irq(&percpu_stats->sync); > local_stats = *percpu_stats; > - } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, > start)); > + } while (u64_stats_fetch_retry_irq(&percpu_stats->sync, > start)); > > stats->n_hit += local_stats.n_hit; > stats->n_missed += local_stats.n_missed; > diff --git a/datapath/linux/compat/include/linux/u64_stats_sync.h > b/datapath/linux/compat/include/linux/u64_stats_sync.h > index 234fd91..9342f73 100644 > --- a/datapath/linux/compat/include/linux/u64_stats_sync.h > +++ b/datapath/linux/compat/include/linux/u64_stats_sync.h > @@ -3,7 +3,8 @@ > > #include <linux/version.h> > > -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) > +#if defined(HAVE_U64_STATS_FETCH_BEGIN_IRQ) && \ > + LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0) > #include_next <linux/u64_stats_sync.h> > #else > why not just check for HAVE_U64_STATS_FETCH_BEGIN_IRQ?
> @@ -33,8 +34,8 @@ > * (On UP, there is no seqcount_t protection, a reader allowing > interrupts could > * read partial values) > * > - * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and > - * u64_stats_fetch_retry_bh() helpers > + * 7) For irq or softirq uses, readers can use u64_stats_fetch_begin_irq() > and > + * u64_stats_fetch_retry_irq() helpers > * > * Usage : > * > @@ -73,6 +74,12 @@ struct u64_stats_sync { > #endif > }; > > +#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) > +# define u64_stats_init(syncp) seqcount_init(syncp.seq) > +#else > +# define u64_stats_init(syncp) do { } while (0) > +#endif > + > static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) > { > #if BITS_PER_LONG==32 && defined(CONFIG_SMP) > @@ -113,46 +120,36 @@ static inline bool u64_stats_fetch_retry(const struct > u64_stats_sync *syncp, > } > > /* > - * In case softirq handlers can update u64 counters, readers can use > following helpers > + * In case irq handlers can update u64 counters, readers can use following > helpers > * - SMP 32bit arches use seqcount protection, irq safe. > - * - UP 32bit must disable BH. > + * - UP 32bit must disable irqs. > * - 64bit have no problem atomically reading u64 values, irq safe. > */ > -static inline unsigned int u64_stats_fetch_begin_bh(const struct > u64_stats_sync *syncp) > +static inline unsigned int u64_stats_fetch_begin_irq(const struct > u64_stats_sync *syncp) > { > #if BITS_PER_LONG==32 && defined(CONFIG_SMP) > return read_seqcount_begin(&syncp->seq); > #else > #if BITS_PER_LONG==32 > - local_bh_disable(); > + local_irq_disable(); > #endif > return 0; > #endif > } > > -static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync > *syncp, > +static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync > *syncp, > unsigned int start) > { > #if BITS_PER_LONG==32 && defined(CONFIG_SMP) > return read_seqcount_retry(&syncp->seq, start); > #else > #if BITS_PER_LONG==32 > - local_bh_enable(); > + local_irq_enable(); > #endif > return false; > #endif > } > > -#endif /* Linux kernel < 2.6.36 */ > - > -#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) > - > -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) > -# define u64_stats_init(syncp) seqcount_init(syncp.seq) > -#else > -# define u64_stats_init(syncp) do { } while (0) > -#endif > - > -#endif > +#endif /* !HAVE_U64_STATS_FETCH_BEGIN_IRQ || kernel < 3.13 */ > > #endif /* _LINUX_U64_STATS_SYNC_WRAPPER_H */ > diff --git a/datapath/vport.c b/datapath/vport.c > index 897f221..42ee726 100644 > --- a/datapath/vport.c > +++ b/datapath/vport.c > @@ -311,9 +311,9 @@ void ovs_vport_get_stats(struct vport *vport, struct > ovs_vport_stats *stats) > percpu_stats = per_cpu_ptr(vport->percpu_stats, i); > > do { > - start = > u64_stats_fetch_begin_bh(&percpu_stats->syncp); > + start = > u64_stats_fetch_begin_irq(&percpu_stats->syncp); > local_stats = *percpu_stats; > - } while (u64_stats_fetch_retry_bh(&percpu_stats->syncp, > start)); > + } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, > start)); > > stats->rx_bytes += local_stats.rx_bytes; > stats->rx_packets += local_stats.rx_packets; Otherwise looks good Acked-by: Pravin B Shelar <pshe...@nicira.com> _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev