Reuse the old and forgotten SKB_ALLOC_NAPI to add an option to get
an skbuff_head from the NAPI cache instead of inplace allocation
inside __alloc_skb().
This implies that the function is called from softirq or BH-off
context, not for allocating a clone or from a distant node.

Signed-off-by: Alexander Lobakin <aloba...@pm.me>
---
 net/core/skbuff.c | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9e1a8ded4acc..750fa1825b28 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -397,15 +397,20 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t 
gfp_mask,
        struct sk_buff *skb;
        u8 *data;
        bool pfmemalloc;
+       bool clone;
 
-       cache = (flags & SKB_ALLOC_FCLONE)
-               ? skbuff_fclone_cache : skbuff_head_cache;
+       clone = !!(flags & SKB_ALLOC_FCLONE);
+       cache = clone ? skbuff_fclone_cache : skbuff_head_cache;
 
        if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
                gfp_mask |= __GFP_MEMALLOC;
 
        /* Get the HEAD */
-       skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
+       if (!clone && (flags & SKB_ALLOC_NAPI) &&
+           likely(node == NUMA_NO_NODE || node == numa_mem_id()))
+               skb = napi_skb_cache_get();
+       else
+               skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
        if (unlikely(!skb))
                return NULL;
        prefetchw(skb);
@@ -436,7 +441,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t 
gfp_mask,
        __build_skb_around(skb, data, 0);
        skb->pfmemalloc = pfmemalloc;
 
-       if (flags & SKB_ALLOC_FCLONE) {
+       if (clone) {
                struct sk_buff_fclones *fclones;
 
                fclones = container_of(skb, struct sk_buff_fclones, skb1);
-- 
2.30.1


Reply via email to