Use static_key for XPS maps to reduce the cost of extra map checks,
similar to how it is used for RPS and RFS.

Signed-off-by: Amritha Nambiar <amritha.namb...@intel.com>
---
 net/core/dev.c |    8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/net/core/dev.c b/net/core/dev.c
index 156acbe..bba755f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2081,6 +2081,8 @@ int netdev_txq_to_tc(struct net_device *dev, unsigned int 
txq)
 EXPORT_SYMBOL(netdev_txq_to_tc);
 
 #ifdef CONFIG_XPS
+struct static_key xps_needed __read_mostly;
+EXPORT_SYMBOL(xps_needed);
 static DEFINE_MUTEX(xps_map_mutex);
 #define xmap_dereference(P)            \
        rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
@@ -2189,6 +2191,7 @@ static void netif_reset_xps_queues(struct net_device 
*dev, u16 offset,
 out_no_maps:
                type++;
        }
+       static_key_slow_dec(&xps_needed);
        mutex_unlock(&xps_map_mutex);
 }
 
@@ -2309,6 +2312,8 @@ int __netif_set_xps_queue(struct net_device *dev, const 
unsigned long *mask,
        if (!new_dev_maps)
                goto out_no_new_maps;
 
+       static_key_slow_inc(&xps_needed);
+
        for (j = -1; j = attrmask_next(j, possible_mask, nr_ids),
             j < nr_ids;) {
                /* copy maps belonging to foreign traffic classes */
@@ -3481,6 +3486,9 @@ static inline int get_xps_queue(struct net_device *dev, 
struct sk_buff *skb)
        struct xps_map *map;
        int queue_index = -1;
 
+       if (!static_key_false(&xps_needed))
+               return -1;
+
        rcu_read_lock();
        dev_maps = rcu_dereference(dev->xps_cpus_map);
        if (dev_maps) {

Reply via email to