From: Magnus Karlsson <magnus.karls...@intel.com>

The new dev_direct_xmit will be used by AF_XDP in later commits.

Signed-off-by: Magnus Karlsson <magnus.karls...@intel.com>
---
 include/linux/netdevice.h |  1 +
 net/core/dev.c            | 38 ++++++++++++++++++++++++++++++++++++++
 net/packet/af_packet.c    | 42 +++++-------------------------------------
 3 files changed, 44 insertions(+), 37 deletions(-)

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 366c32891158..a30435118530 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2486,6 +2486,7 @@ void dev_disable_lro(struct net_device *dev);
 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff 
*newskb);
 int dev_queue_xmit(struct sk_buff *skb);
 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
+int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
 int register_netdevice(struct net_device *dev);
 void unregister_netdevice_queue(struct net_device *dev, struct list_head 
*head);
 void unregister_netdevice_many(struct list_head *head);
diff --git a/net/core/dev.c b/net/core/dev.c
index aea36b5a2fed..d3fdc86516e8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3625,6 +3625,44 @@ int dev_queue_xmit_accel(struct sk_buff *skb, void 
*accel_priv)
 }
 EXPORT_SYMBOL(dev_queue_xmit_accel);
 
+int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
+{
+       struct net_device *dev = skb->dev;
+       struct sk_buff *orig_skb = skb;
+       struct netdev_queue *txq;
+       int ret = NETDEV_TX_BUSY;
+       bool again = false;
+
+       if (unlikely(!netif_running(dev) ||
+                    !netif_carrier_ok(dev)))
+               goto drop;
+
+       skb = validate_xmit_skb_list(skb, dev, &again);
+       if (skb != orig_skb)
+               goto drop;
+
+       skb_set_queue_mapping(skb, queue_id);
+       txq = skb_get_tx_queue(dev, skb);
+
+       local_bh_disable();
+
+       HARD_TX_LOCK(dev, txq, smp_processor_id());
+       if (!netif_xmit_frozen_or_drv_stopped(txq))
+               ret = netdev_start_xmit(skb, dev, txq, false);
+       HARD_TX_UNLOCK(dev, txq);
+
+       local_bh_enable();
+
+       if (!dev_xmit_complete(ret))
+               kfree_skb(skb);
+
+       return ret;
+drop:
+       atomic_long_inc(&dev->tx_dropped);
+       kfree_skb_list(skb);
+       return NET_XMIT_DROP;
+}
+EXPORT_SYMBOL(dev_direct_xmit);
 
 /*************************************************************************
  *                     Receiver routines
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 01f3515cada0..611a26d5235c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -209,7 +209,7 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
                struct tpacket3_hdr *);
 static void packet_flush_mclist(struct sock *sk);
-static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb);
+static u16 packet_pick_tx_queue(struct sk_buff *skb);
 
 struct packet_skb_cb {
        union {
@@ -243,40 +243,7 @@ static void __fanout_link(struct sock *sk, struct 
packet_sock *po);
 
 static int packet_direct_xmit(struct sk_buff *skb)
 {
-       struct net_device *dev = skb->dev;
-       struct sk_buff *orig_skb = skb;
-       struct netdev_queue *txq;
-       int ret = NETDEV_TX_BUSY;
-       bool again = false;
-
-       if (unlikely(!netif_running(dev) ||
-                    !netif_carrier_ok(dev)))
-               goto drop;
-
-       skb = validate_xmit_skb_list(skb, dev, &again);
-       if (skb != orig_skb)
-               goto drop;
-
-       packet_pick_tx_queue(dev, skb);
-       txq = skb_get_tx_queue(dev, skb);
-
-       local_bh_disable();
-
-       HARD_TX_LOCK(dev, txq, smp_processor_id());
-       if (!netif_xmit_frozen_or_drv_stopped(txq))
-               ret = netdev_start_xmit(skb, dev, txq, false);
-       HARD_TX_UNLOCK(dev, txq);
-
-       local_bh_enable();
-
-       if (!dev_xmit_complete(ret))
-               kfree_skb(skb);
-
-       return ret;
-drop:
-       atomic_long_inc(&dev->tx_dropped);
-       kfree_skb_list(skb);
-       return NET_XMIT_DROP;
+       return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
 }
 
 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
@@ -313,8 +280,9 @@ static u16 __packet_pick_tx_queue(struct net_device *dev, 
struct sk_buff *skb)
        return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
 }
 
-static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 packet_pick_tx_queue(struct sk_buff *skb)
 {
+       struct net_device *dev = skb->dev;
        const struct net_device_ops *ops = dev->netdev_ops;
        u16 queue_index;
 
@@ -326,7 +294,7 @@ static void packet_pick_tx_queue(struct net_device *dev, 
struct sk_buff *skb)
                queue_index = __packet_pick_tx_queue(dev, skb);
        }
 
-       skb_set_queue_mapping(skb, queue_index);
+       return queue_index;
 }
 
 /* __register_prot_hook must be invoked through register_prot_hook
-- 
2.14.1

Reply via email to