Abstract out the code for steering a flow to an aRFS queue (via
ndo_rx_flow_steer) into its own function. This allows the function to
be called in other use cases.
---
 net/core/dev.c | 67 +++++++++++++++++++++++++++++---------------------
 1 file changed, 39 insertions(+), 28 deletions(-)

diff --git a/net/core/dev.c b/net/core/dev.c
index 6bc2388141f6..9f7a3e78e23a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4250,42 +4250,53 @@ EXPORT_SYMBOL(rps_needed);
 struct static_key_false rfs_needed __read_mostly;
 EXPORT_SYMBOL(rfs_needed);
 
+#ifdef CONFIG_RFS_ACCEL
+static void set_arfs_queue(struct net_device *dev, struct sk_buff *skb,
+                          struct rps_dev_flow *rflow, u16 rxq_index)
+{
+       struct rps_dev_flow_table *flow_table;
+       struct netdev_rx_queue *rxqueue;
+       struct rps_dev_flow *old_rflow;
+       u32 flow_id;
+       int rc;
+
+       rxqueue = dev->_rx + rxq_index;
+
+       flow_table = rcu_dereference(rxqueue->rps_flow_table);
+       if (!flow_table)
+               return;
+
+       flow_id = skb_get_hash(skb) & flow_table->mask;
+       rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
+                                               rxq_index, flow_id);
+       if (rc < 0)
+               return;
+
+       old_rflow = rflow;
+       rflow = &flow_table->flows[flow_id];
+       rflow->filter = rc;
+       if (old_rflow->filter == rflow->filter)
+               old_rflow->filter = RPS_NO_FILTER;
+}
+#endif
+
 static struct rps_dev_flow *
 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
            struct rps_dev_flow *rflow, u16 next_cpu)
 {
        if (next_cpu < nr_cpu_ids) {
 #ifdef CONFIG_RFS_ACCEL
-               struct netdev_rx_queue *rxqueue;
-               struct rps_dev_flow_table *flow_table;
-               struct rps_dev_flow *old_rflow;
-               u32 flow_id;
-               u16 rxq_index;
-               int rc;
 
                /* Should we steer this flow to a different hardware queue? */
-               if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
-                   !(dev->features & NETIF_F_NTUPLE))
-                       goto out;
-               rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
-               if (rxq_index == skb_get_rx_queue(skb))
-                       goto out;
-
-               rxqueue = dev->_rx + rxq_index;
-               flow_table = rcu_dereference(rxqueue->rps_flow_table);
-               if (!flow_table)
-                       goto out;
-               flow_id = skb_get_hash(skb) & flow_table->mask;
-               rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
-                                                       rxq_index, flow_id);
-               if (rc < 0)
-                       goto out;
-               old_rflow = rflow;
-               rflow = &flow_table->flows[flow_id];
-               rflow->filter = rc;
-               if (old_rflow->filter == rflow->filter)
-                       old_rflow->filter = RPS_NO_FILTER;
-       out:
+               if (skb_rx_queue_recorded(skb) && dev->rx_cpu_rmap &&
+                   (dev->features & NETIF_F_NTUPLE)) {
+                       u16 rxq_index;
+
+                       rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap,
+                                                         next_cpu);
+                       if (rxq_index != skb_get_rx_queue(skb))
+                               set_arfs_queue(dev, skb, rflow, rxq_index);
+               }
 #endif
                rflow->last_qtail =
                        per_cpu(softnet_data, next_cpu).input_queue_head;
-- 
2.25.1

Reply via email to