It makes no sense to have a second &net_device_ops struct (800 bytes of
rodata) with only one difference in .ndo_start_xmit, which can easily
be just one `if`. This `if` is a drop in the ocean and you won't see
any difference.
Define unified idpf_xmit_start(). The preparation for sending is the
same, just call either idpf_tx_splitq_frame() or idpf_tx_singleq_frame()
depending on the active model to actually map and send the skb.

Signed-off-by: Alexander Lobakin <aleksander.loba...@intel.com>
---
 drivers/net/ethernet/intel/idpf/idpf_txrx.h   |  7 ++---
 drivers/net/ethernet/intel/idpf/idpf_lib.c    | 26 +++-------------
 .../ethernet/intel/idpf/idpf_singleq_txrx.c   | 31 ++-----------------
 drivers/net/ethernet/intel/idpf/idpf_txrx.c   | 10 +++---
 4 files changed, 15 insertions(+), 59 deletions(-)

diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h 
b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 0192d33744ff..015aba5abb3c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -1190,10 +1190,9 @@ bool idpf_chk_linearize(struct sk_buff *skb, unsigned 
int max_bufs,
                        unsigned int count);
 int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size);
 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
-netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
-                                struct net_device *netdev);
-netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
-                                 struct net_device *netdev);
+netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+                                 struct idpf_tx_queue *tx_q);
+netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
 bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
                                      u16 cleaned_count);
 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c 
b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index a8be09a89943..fe91475c7b4c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -4,8 +4,7 @@
 #include "idpf.h"
 #include "idpf_virtchnl.h"
 
-static const struct net_device_ops idpf_netdev_ops_splitq;
-static const struct net_device_ops idpf_netdev_ops_singleq;
+static const struct net_device_ops idpf_netdev_ops;
 
 /**
  * idpf_init_vector_stack - Fill the MSIX vector stack with vector index
@@ -764,10 +763,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
        }
 
        /* assign netdev_ops */
-       if (idpf_is_queue_model_split(vport->txq_model))
-               netdev->netdev_ops = &idpf_netdev_ops_splitq;
-       else
-               netdev->netdev_ops = &idpf_netdev_ops_singleq;
+       netdev->netdev_ops = &idpf_netdev_ops;
 
        /* setup watchdog timeout value to be 5 second */
        netdev->watchdog_timeo = 5 * HZ;
@@ -2353,24 +2349,10 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct 
idpf_dma_mem *mem)
        mem->pa = 0;
 }
 
-static const struct net_device_ops idpf_netdev_ops_splitq = {
-       .ndo_open = idpf_open,
-       .ndo_stop = idpf_stop,
-       .ndo_start_xmit = idpf_tx_splitq_start,
-       .ndo_features_check = idpf_features_check,
-       .ndo_set_rx_mode = idpf_set_rx_mode,
-       .ndo_validate_addr = eth_validate_addr,
-       .ndo_set_mac_address = idpf_set_mac,
-       .ndo_change_mtu = idpf_change_mtu,
-       .ndo_get_stats64 = idpf_get_stats64,
-       .ndo_set_features = idpf_set_features,
-       .ndo_tx_timeout = idpf_tx_timeout,
-};
-
-static const struct net_device_ops idpf_netdev_ops_singleq = {
+static const struct net_device_ops idpf_netdev_ops = {
        .ndo_open = idpf_open,
        .ndo_stop = idpf_stop,
-       .ndo_start_xmit = idpf_tx_singleq_start,
+       .ndo_start_xmit = idpf_tx_start,
        .ndo_features_check = idpf_features_check,
        .ndo_set_rx_mode = idpf_set_rx_mode,
        .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c 
b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index b51f7cd6db01..a3b60a2dfcaa 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -351,8 +351,8 @@ static void idpf_tx_singleq_build_ctx_desc(struct 
idpf_tx_queue *txq,
  *
  * Returns NETDEV_TX_OK if sent, else an error code
  */
-static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
-                                        struct idpf_tx_queue *tx_q)
+netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+                                 struct idpf_tx_queue *tx_q)
 {
        struct idpf_tx_offload_params offload = { };
        struct idpf_tx_buf *first;
@@ -408,33 +408,6 @@ static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff 
*skb,
        return idpf_tx_drop_skb(tx_q, skb);
 }
 
-/**
- * idpf_tx_singleq_start - Selects the right Tx queue to send buffer
- * @skb: send buffer
- * @netdev: network interface device structure
- *
- * Returns NETDEV_TX_OK if sent, else an error code
- */
-netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
-                                 struct net_device *netdev)
-{
-       struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
-       struct idpf_tx_queue *tx_q;
-
-       tx_q = vport->txqs[skb_get_queue_mapping(skb)];
-
-       /* hardware can't handle really short frames, hardware padding works
-        * beyond this point
-        */
-       if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) {
-               idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
-
-               return NETDEV_TX_OK;
-       }
-
-       return idpf_tx_singleq_frame(skb, tx_q);
-}
-
 /**
  * idpf_tx_singleq_clean - Reclaim resources from queue
  * @tx_q: Tx queue to clean
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c 
b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 500754795cc8..4aa5ee781bd7 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2849,14 +2849,13 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff 
*skb,
 }
 
 /**
- * idpf_tx_splitq_start - Selects the right Tx queue to send buffer
+ * idpf_tx_start - Selects the right Tx queue to send buffer
  * @skb: send buffer
  * @netdev: network interface device structure
  *
  * Returns NETDEV_TX_OK if sent, else an error code
  */
-netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
-                                struct net_device *netdev)
+netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
 {
        struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
        struct idpf_tx_queue *tx_q;
@@ -2878,7 +2877,10 @@ netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
-       return idpf_tx_splitq_frame(skb, tx_q);
+       if (idpf_is_queue_model_split(vport->txq_model))
+               return idpf_tx_splitq_frame(skb, tx_q);
+       else
+               return idpf_tx_singleq_frame(skb, tx_q);
 }
 
 /**
-- 
2.45.0

Reply via email to