In C, we have structures and unions.
Casting `void *` via macros is not only error-prone, but also looks
confusing and awful in general.
Replace it with a union and direct array dereferences. Had idpf had
separate queue structures, it would look way more elegant -- will do
one day.

Signed-off-by: Alexander Lobakin <aleksander.loba...@intel.com>
---
 .../ethernet/intel/idpf/idpf_singleq_txrx.c   | 20 +++++-----
 drivers/net/ethernet/intel/idpf/idpf_txrx.c   | 30 +++++++--------
 drivers/net/ethernet/intel/idpf/idpf_txrx.h   | 37 ++++++++-----------
 3 files changed, 40 insertions(+), 47 deletions(-)

diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c 
b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 23dcc02e6976..7072d45f007b 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -206,7 +206,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
        data_len = skb->data_len;
        size = skb_headlen(skb);
 
-       tx_desc = IDPF_BASE_TX_DESC(tx_q, i);
+       tx_desc = &tx_q->base_tx[i];
 
        dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
 
@@ -242,7 +242,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
                        i++;
 
                        if (i == tx_q->desc_count) {
-                               tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+                               tx_desc = &tx_q->base_tx[0];
                                i = 0;
                        }
 
@@ -262,7 +262,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
                i++;
 
                if (i == tx_q->desc_count) {
-                       tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+                       tx_desc = &tx_q->base_tx[0];
                        i = 0;
                }
 
@@ -311,7 +311,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
        memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
        txq->tx_buf[ntu].ctx_entry = true;
 
-       ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu);
+       ctx_desc = &txq->base_ctx[ntu];
 
        IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu);
        txq->next_to_use = ntu;
@@ -460,7 +460,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, 
int napi_budget,
        struct netdev_queue *nq;
        bool dont_wake;
 
-       tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc);
+       tx_desc = &tx_q->base_tx[ntc];
        tx_buf = &tx_q->tx_buf[ntc];
        ntc -= tx_q->desc_count;
 
@@ -509,7 +509,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, 
int napi_budget,
                        if (unlikely(!ntc)) {
                                ntc -= tx_q->desc_count;
                                tx_buf = tx_q->tx_buf;
-                               tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+                               tx_desc = &tx_q->base_tx[0];
                        }
 
                        /* unmap any remaining paged data */
@@ -527,7 +527,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, 
int napi_budget,
                if (unlikely(!ntc)) {
                        ntc -= tx_q->desc_count;
                        tx_buf = tx_q->tx_buf;
-                       tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+                       tx_desc = &tx_q->base_tx[0];
                }
        } while (likely(budget));
 
@@ -880,7 +880,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue 
*rx_q,
        if (!cleaned_count)
                return false;
 
-       desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta);
+       desc = &rx_q->single_buf[nta];
 
        do {
                dma_addr_t addr;
@@ -898,7 +898,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue 
*rx_q,
 
                nta++;
                if (unlikely(nta == rx_q->desc_count)) {
-                       desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0);
+                       desc = &rx_q->single_buf[0];
                        nta = 0;
                }
 
@@ -998,7 +998,7 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, 
int budget)
                struct idpf_rx_buf *rx_buf;
 
                /* get the Rx desc from Rx queue based on 'next_to_clean' */
-               rx_desc = IDPF_RX_DESC(rx_q, ntc);
+               rx_desc = &rx_q->rx[ntc];
 
                /* status_error_ptype_len will always be zero for unused
                 * descriptors because it's cleared in cleanup, and overlaps
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c 
b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 6fd9128e61d8..40b8d8b17827 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -533,7 +533,7 @@ static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, 
u16 buf_id)
        u16 nta = bufq->next_to_alloc;
        dma_addr_t addr;
 
-       splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta);
+       splitq_rx_desc = &bufq->split_buf[nta];
 
        if (bufq->rx_hsplit_en) {
                bq.pp = bufq->hdr_pp;
@@ -1560,7 +1560,7 @@ do {                                                      
        \
        if (unlikely(!(ntc))) {                                 \
                ntc -= (txq)->desc_count;                       \
                buf = (txq)->tx_buf;                            \
-               desc = IDPF_FLEX_TX_DESC(txq, 0);               \
+               desc = &(txq)->flex_tx[0];                      \
        } else {                                                \
                (buf)++;                                        \
                (desc)++;                                       \
@@ -1593,8 +1593,8 @@ static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, 
u16 end,
        s16 ntc = tx_q->next_to_clean;
        struct idpf_tx_buf *tx_buf;
 
-       tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc);
-       next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end);
+       tx_desc = &tx_q->flex_tx[ntc];
+       next_pending_desc = &tx_q->flex_tx[end];
        tx_buf = &tx_q->tx_buf[ntc];
        ntc -= tx_q->desc_count;
 
@@ -1774,7 +1774,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue 
*complq, int budget,
        int i;
 
        complq_budget = vport->compln_clean_budget;
-       tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc);
+       tx_desc = &complq->comp[ntc];
        ntc -= complq->desc_count;
 
        do {
@@ -1840,7 +1840,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue 
*complq, int budget,
                ntc++;
                if (unlikely(!ntc)) {
                        ntc -= complq->desc_count;
-                       tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0);
+                       tx_desc = &complq->comp[0];
                        change_bit(__IDPF_Q_GEN_CHK, complq->flags);
                }
 
@@ -2107,7 +2107,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct 
sk_buff *skb,
                 * used one additional descriptor for a context
                 * descriptor. Reset that here.
                 */
-               tx_desc = IDPF_FLEX_TX_DESC(txq, idx);
+               tx_desc = &txq->flex_tx[idx];
                memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
                if (idx == 0)
                        idx = txq->desc_count;
@@ -2167,7 +2167,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
        data_len = skb->data_len;
        size = skb_headlen(skb);
 
-       tx_desc = IDPF_FLEX_TX_DESC(tx_q, i);
+       tx_desc = &tx_q->flex_tx[i];
 
        dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
 
@@ -2241,7 +2241,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
                        i++;
 
                        if (i == tx_q->desc_count) {
-                               tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+                               tx_desc = &tx_q->flex_tx[0];
                                i = 0;
                                tx_q->compl_tag_cur_gen =
                                        IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
@@ -2286,7 +2286,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
                i++;
 
                if (i == tx_q->desc_count) {
-                       tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+                       tx_desc = &tx_q->flex_tx[0];
                        i = 0;
                        tx_q->compl_tag_cur_gen = 
IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
                }
@@ -2520,7 +2520,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
        txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
 
        /* grab the next descriptor */
-       desc = IDPF_FLEX_TX_CTX_DESC(txq, i);
+       desc = &txq->flex_ctx[i];
        txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
 
        return desc;
@@ -3020,7 +3020,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, 
int budget)
                u8 rxdid;
 
                /* get the Rx desc from Rx queue based on 'next_to_clean' */
-               desc = IDPF_RX_DESC(rxq, ntc);
+               desc = &rxq->rx[ntc];
                rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc;
 
                /* This memory barrier is needed to keep us from reading
@@ -3225,11 +3225,11 @@ static void idpf_rx_clean_refillq(struct idpf_queue 
*bufq,
        int cleaned = 0;
        u16 gen;
 
-       buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta);
+       buf_desc = &bufq->split_buf[bufq_nta];
 
        /* make sure we stop at ring wrap in the unlikely case ring is full */
        while (likely(cleaned < refillq->desc_count)) {
-               u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc);
+               u16 refill_desc = refillq->ring[ntc];
                bool failure;
 
                gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc);
@@ -3247,7 +3247,7 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
                }
 
                if (unlikely(++bufq_nta == bufq->desc_count)) {
-                       buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0);
+                       buf_desc = &bufq->split_buf[0];
                        bufq_nta = 0;
                } else {
                        buf_desc++;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h 
b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 5975c6d029d7..2584bd94363f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -112,24 +112,6 @@ do {                                                       
        \
 #define IDPF_RXD_EOF_SPLITQ            VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
 #define IDPF_RXD_EOF_SINGLEQ           VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
 
-#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i)       \
-       (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
-#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i)        \
-       (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
-#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
-
-#define IDPF_BASE_TX_DESC(txq, i)      \
-       (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
-#define IDPF_BASE_TX_CTX_DESC(txq, i) \
-       (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
-#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i)    \
-       (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
-
-#define IDPF_FLEX_TX_DESC(txq, i) \
-       (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
-#define IDPF_FLEX_TX_CTX_DESC(txq, i)  \
-       (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
-
 #define IDPF_DESC_UNUSED(txq)     \
        ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) 
+ \
        (txq)->next_to_clean - (txq)->next_to_use - 1)
@@ -275,9 +257,6 @@ struct idpf_rx_extracted {
 #define IDPF_TX_MAX_DESC_DATA_ALIGNED \
        ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
 
-#define IDPF_RX_DESC(rxq, i)   \
-       (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
-
 #define idpf_rx_buf libie_rx_buffer
 
 #define IDPF_RX_MAX_PTYPE_PROTO_IDS    32
@@ -586,7 +565,21 @@ struct idpf_queue {
                struct page_pool *pp;
                struct device *dev;
        };
-       void *desc_ring;
+       union {
+               union virtchnl2_rx_desc *rx;
+
+               struct virtchnl2_singleq_rx_buf_desc *single_buf;
+               struct virtchnl2_splitq_rx_buf_desc *split_buf;
+
+               struct idpf_base_tx_desc *base_tx;
+               struct idpf_base_tx_ctx_desc *base_ctx;
+               union idpf_tx_flex_desc *flex_tx;
+               struct idpf_flex_tx_ctx_desc *flex_ctx;
+
+               struct idpf_splitq_tx_compl_desc *comp;
+
+               void *desc_ring;
+       };
 
        u32 hdr_truesize;
        u32 truesize;
-- 
2.43.0

_______________________________________________
Intel-wired-lan mailing list
Intel-wired-lan@osuosl.org
https://lists.osuosl.org/mailman/listinfo/intel-wired-lan

Reply via email to