We can avoid an indirect call per packet wrapping the skb creation
with the appropriate helper.

Signed-off-by: Paolo Abeni <pab...@redhat.com>
---
 .../net/ethernet/mellanox/mlx5/core/en_rx.c   | 22 ++++++++++++++-----
 1 file changed, 17 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 13133e7f088e..0fe5f13d07cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,6 +34,7 @@
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/tcp.h>
+#include <linux/indirect_call_wrapper.h>
 #include <net/ip6_checksum.h>
 #include <net/page_pool.h>
 #include <net/inet_ecn.h>
@@ -1092,7 +1093,10 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct 
mlx5_cqe64 *cqe)
        wi       = get_frag(rq, ci);
        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
 
-       skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+       skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+                             mlx5e_skb_from_cqe_linear,
+                             mlx5e_skb_from_cqe_nonlinear,
+                             rq, cqe, wi, cqe_bcnt);
        if (!skb) {
                /* probably for XDP */
                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1279,8 +1283,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, 
struct mlx5_cqe64 *cqe)
 
        cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
 
-       skb = rq->mpwqe.skb_from_cqe_mpwrq(rq, wi, cqe_bcnt, head_offset,
-                                          page_idx);
+       skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
+                             mlx5e_skb_from_cqe_mpwrq_linear,
+                             mlx5e_skb_from_cqe_mpwrq_nonlinear,
+                             rq, wi, cqe_bcnt, head_offset, page_idx);
        if (!skb)
                goto mpwrq_cqe_out;
 
@@ -1437,7 +1443,10 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct 
mlx5_cqe64 *cqe)
        wi       = get_frag(rq, ci);
        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
 
-       skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+       skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+                             mlx5e_skb_from_cqe_linear,
+                             mlx5e_skb_from_cqe_nonlinear,
+                             rq, cqe, wi, cqe_bcnt);
        if (!skb)
                goto wq_free_wqe;
 
@@ -1469,7 +1478,10 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, 
struct mlx5_cqe64 *cqe)
        wi       = get_frag(rq, ci);
        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
 
-       skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+       skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+                             mlx5e_skb_from_cqe_linear,
+                             mlx5e_skb_from_cqe_nonlinear,
+                             rq, cqe, wi, cqe_bcnt);
        if (unlikely(!skb)) {
                /* a DROP, save the page-reuse checks */
                mlx5e_free_rx_wqe(rq, wi, true);
-- 
2.20.1

Reply via email to