From: Maxim Mikityanskiy <maxi...@mellanox.com>

Before this patch, mlx5e_ktls_tx_handle_resync_dump_comp checked for
resync_dump_frag_page. It happened for all WQEs without an SKB,
including padding WQEs, and required a function call. Normally, padding
WQEs happen more often than TLS resyncs. Take this check out of the
function and put it to an inline function to save a call on all padding
WQEs.

Signed-off-by: Maxim Mikityanskiy <maxi...@mellanox.com>
Signed-off-by: Saeed Mahameed <sae...@nvidia.com>
---
 .../ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c |  3 ---
 .../mellanox/mlx5/core/en_accel/ktls_txrx.h        | 14 +++++++++++---
 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c    |  4 ++--
 3 files changed, 13 insertions(+), 8 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index f4861545b236..b140e13fdcc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -345,9 +345,6 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct 
mlx5e_txqsq *sq,
        struct mlx5e_sq_stats *stats;
        struct mlx5e_sq_dma *dma;
 
-       if (!wi->resync_dump_frag_page)
-               return;
-
        dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
        stats = sq->stats;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h 
b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index ff4c740af10b..fcfb156cf09d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -29,11 +29,19 @@ void mlx5e_ktls_handle_get_psv_completion(struct 
mlx5e_icosq_wqe_info *wi,
 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
                                           struct mlx5e_tx_wqe_info *wi,
                                           u32 *dma_fifo_cc);
+static inline void
+mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+                                         struct mlx5e_tx_wqe_info *wi,
+                                         u32 *dma_fifo_cc)
+{
+       if (unlikely(wi->resync_dump_frag_page))
+               mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma_fifo_cc);
+}
 #else
 static inline void
-mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
-                                     struct mlx5e_tx_wqe_info *wi,
-                                     u32 *dma_fifo_cc)
+mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+                                         struct mlx5e_tx_wqe_info *wi,
+                                         u32 *dma_fifo_cc)
 {
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 46bdbbbfaf65..869b3313dabf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -530,7 +530,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
                        sqcc += wi->num_wqebbs;
 
                        if (unlikely(!skb)) {
-                               mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, 
&dma_fifo_cc);
+                               mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, 
wi, &dma_fifo_cc);
                                continue;
                        }
 
@@ -595,7 +595,7 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
                sqcc += wi->num_wqebbs;
 
                if (!skb) {
-                       mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, 
&dma_fifo_cc);
+                       mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, 
&dma_fifo_cc);
                        continue;
                }
 
-- 
2.26.2

Reply via email to