Drop TX packets when posting the work request fails and ensure DMA
mappings are always cleaned up.

Signed-off-by: Aditya Garg <[email protected]>
Reviewed-by: Haiyang Zhang <[email protected]>
---
Changes in v6:
* No change.

Changes in v5:
* No change.

Changes in v4:
* Fix warning during build reported by kernel test robot
---
 drivers/net/ethernet/microsoft/mana/gdma_main.c | 6 +-----
 drivers/net/ethernet/microsoft/mana/mana_en.c   | 7 +++----
 include/net/mana/mana.h                         | 1 +
 3 files changed, 5 insertions(+), 9 deletions(-)

diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c 
b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index effe0a2f207a..8fd70b34807a 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1300,7 +1300,6 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
                              struct gdma_posted_wqe_info *wqe_info)
 {
        u32 client_oob_size = wqe_req->inline_oob_size;
-       struct gdma_context *gc;
        u32 sgl_data_size;
        u32 max_wqe_size;
        u32 wqe_size;
@@ -1330,11 +1329,8 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
        if (wqe_size > max_wqe_size)
                return -EINVAL;
 
-       if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
-               gc = wq->gdma_dev->gdma_context;
-               dev_err(gc->dev, "unsuccessful flow control!\n");
+       if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq))
                return -ENOSPC;
-       }
 
        if (wqe_info)
                wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c 
b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 7b49ab005e2d..1ad154f9db1a 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -492,9 +492,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct 
net_device *ndev)
 
        if (err) {
                (void)skb_dequeue_tail(&txq->pending_skbs);
+               mana_unmap_skb(skb, apc);
                netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
-               err = NETDEV_TX_BUSY;
-               goto tx_busy;
+               goto free_sgl_ptr;
        }
 
        err = NETDEV_TX_OK;
@@ -514,7 +514,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct 
net_device *ndev)
        tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
        u64_stats_update_end(&tx_stats->syncp);
 
-tx_busy:
        if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
                netif_tx_wake_queue(net_txq);
                apc->eth_stats.wake_queue++;
@@ -1687,7 +1686,7 @@ static int mana_move_wq_tail(struct gdma_queue *wq, u32 
num_units)
        return 0;
 }
 
-static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
+void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
 {
        struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
        struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index fb28b3cac067..d7e089c6b694 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -593,6 +593,7 @@ int mana_set_bw_clamp(struct mana_port_context *apc, u32 
speed,
 void mana_query_phy_stats(struct mana_port_context *apc);
 int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int 
num_queues);
 void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
+void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc);
 
 extern const struct ethtool_ops mana_ethtool_ops;
 extern struct dentry *mana_debugfs_root;
-- 
2.43.0


Reply via email to