The NIC device drivers are expected to use this small helper API, when
building up an array of objects/skbs to bulk free, while (loop)
processing objects to free.  Objects to be free'ed later is added
(dev_free_waitlist_add) to an array and flushed if the array runs
full.  After processing the array is flushed (dev_free_waitlist_flush).
The array should be stored on the local stack.

Usage e.g. during TX completion loop the NIC driver can replace
dev_consume_skb_any() with an "add" and after the loop a "flush".

For performance reasons the compiler should inline most of these
functions.

Signed-off-by: Jesper Dangaard Brouer <bro...@redhat.com>
---
 include/linux/netdevice.h |   62 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 62 insertions(+)

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 05b9a694e213..d0133e778314 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2935,6 +2935,68 @@ static inline void dev_consume_skb_any(struct sk_buff 
*skb)
        __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
 }
 
+/* The NIC device drivers are expected to use this small helper API,
+ * when building up an array of objects/skbs to bulk free, while
+ * (loop) processing objects to free.  Objects to be free'ed later is
+ * added (dev_free_waitlist_add) to an array and flushed if the array
+ * runs full.  After processing the array is flushed (dev_free_waitlist_flush).
+ * The array should be stored on the local stack.
+ *
+ * Usage e.g. during TX completion loop the NIC driver can replace
+ * dev_consume_skb_any() with an "add" and after the loop a "flush".
+ *
+ * For performance reasons the compiler should inline most of these
+ * functions.
+ */
+struct dev_free_waitlist {
+       struct sk_buff **skbs;
+       unsigned int skb_cnt;
+};
+
+static void __dev_free_waitlist_bulkfree(struct dev_free_waitlist *wl)
+{
+       /* Cannot bulk free from interrupt context or with IRQs
+        * disabled, due to how SLAB bulk API works (and gain it's
+        * speedup).  This can e.g. happen due to invocation from
+        * netconsole/netpoll.
+        */
+       if (unlikely(in_irq() || irqs_disabled())) {
+               int i;
+
+               for (i = 0; i < wl->skb_cnt; i++)
+                       dev_consume_skb_irq(wl->skbs[i]);
+       } else {
+               /* Likely fastpath, don't call with cnt == 0 */
+               kfree_skb_bulk(wl->skbs, wl->skb_cnt);
+       }
+}
+
+static inline void dev_free_waitlist_flush(struct dev_free_waitlist *wl)
+{
+       /* Flush the waitlist, but only if any objects remain, as bulk
+        * freeing "zero" objects is not supported and plus it avoids
+        * pointless function calls.
+        */
+       if (likely(wl->skb_cnt))
+               __dev_free_waitlist_bulkfree(wl);
+}
+
+static __always_inline void dev_free_waitlist_add(struct dev_free_waitlist *wl,
+                                                 struct sk_buff *skb,
+                                                 unsigned int max)
+{
+       /* It is recommended that max is a builtin constant, as this
+        * saves one register when inlined. Catch offenders with:
+        * BUILD_BUG_ON(!__builtin_constant_p(max));
+        */
+       wl->skbs[wl->skb_cnt++] = skb;
+       if (wl->skb_cnt == max) {
+               /* Detect when waitlist array is full, then flush and reset */
+               __dev_free_waitlist_bulkfree(wl);
+               wl->skb_cnt = 0;
+       }
+}
+
 int netif_rx(struct sk_buff *skb);
 int netif_rx_ni(struct sk_buff *skb);
 int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb);

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to