From: Tariq Toukan <tar...@mellanox.com>

Make the xdp_xmit indication available for Striding RQ
by taking it out of the type-specific union.
This refactor is a preparation for a downstream patch that
adds XDP support over Striding RQ.
In addition, use a bitmap instead of a boolean for possible
future flags.

Signed-off-by: Tariq Toukan <tar...@mellanox.com>
Signed-off-by: Saeed Mahameed <sae...@mellanox.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en.h    | 6 +++++-
 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 8 +++-----
 2 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h 
b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index d26dd4bc89f4..a6ca54393bb6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -479,6 +479,10 @@ typedef struct sk_buff *
 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
 
+enum mlx5e_rq_flag {
+       MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
+};
+
 struct mlx5e_rq {
        /* data path */
        struct mlx5_wq_ll      wq;
@@ -489,7 +493,6 @@ struct mlx5e_rq {
                        u32 frag_sz;    /* max possible skb frag_sz */
                        union {
                                bool page_reuse;
-                               bool xdp_xmit;
                        };
                } wqe;
                struct {
@@ -528,6 +531,7 @@ struct mlx5e_rq {
        struct bpf_prog       *xdp_prog;
        unsigned int           hw_mtu;
        struct mlx5e_xdpsq     xdpsq;
+       DECLARE_BITMAP(flags, 8);
 
        /* control */
        struct mlx5_wq_ctrl    wq_ctrl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 07db8a58d0a2..a827571deb85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -788,7 +788,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
        /* move page to reference to sq responsibility,
         * and mark so it's not put back in page-cache.
         */
-       rq->wqe.xdp_xmit = true;
+       __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
        sq->db.di[pi] = *di;
        sq->pc++;
 
@@ -913,9 +913,8 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct 
mlx5_cqe64 *cqe)
        skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
        if (!skb) {
                /* probably for XDP */
-               if (rq->wqe.xdp_xmit) {
+               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
                        wi->di.page = NULL;
-                       rq->wqe.xdp_xmit = false;
                        /* do not return page to cache, it will be returned on 
XDP_TX completion */
                        goto wq_ll_pop;
                }
@@ -955,9 +954,8 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct 
mlx5_cqe64 *cqe)
 
        skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
        if (!skb) {
-               if (rq->wqe.xdp_xmit) {
+               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
                        wi->di.page = NULL;
-                       rq->wqe.xdp_xmit = false;
                        /* do not return page to cache, it will be returned on 
XDP_TX completion */
                        goto wq_ll_pop;
                }
-- 
2.14.3

Reply via email to