Reading the {s,l}bq_prod_idx registers on a running device, it appears that
the adapter will only use buffers up to prod_idx & 0xfff0. The driver
currently uses fixed-size guard zones (16 for sbq, 32 for lbq - don't know
why this difference). After the previous patch, this approach no longer
guarantees prod_idx values aligned on multiples of 16. While it appears
that we can write unaligned values to prod_idx without ill effects on
device operation, it makes more sense to change qlge_refill_bq() to refill
up to a limit that corresponds with the device's behavior.

Signed-off-by: Benjamin Poirier <bpoir...@suse.com>
---
 drivers/staging/qlge/qlge.h      |  8 ++++++++
 drivers/staging/qlge/qlge_main.c | 29 +++++++++++------------------
 2 files changed, 19 insertions(+), 18 deletions(-)

diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index 7c48e333d29b..e5a352df8228 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -1423,6 +1423,9 @@ struct qlge_bq {
        __le64 *base_indirect;
        dma_addr_t base_indirect_dma;
        struct qlge_bq_desc *queue;
+       /* prod_idx is the index of the first buffer that may NOT be used by
+        * hw, ie. one after the last. Advanced by sw.
+        */
        void __iomem *prod_idx_db_reg;
        /* next index where sw should refill a buffer for hw */
        u16 next_to_use;
@@ -1442,6 +1445,11 @@ struct qlge_bq {
                                          offsetof(struct rx_ring, lbq))); \
 })
 
+/* Experience shows that the device ignores the low 4 bits of the tail index.
+ * Refill up to a x16 multiple.
+ */
+#define QLGE_BQ_ALIGN(index) ALIGN_DOWN(index, 16)
+
 #define QLGE_BQ_WRAP(index) ((index) & (QLGE_BQ_LEN - 1))
 
 struct rx_ring {
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 83e75005688a..02ad0cdf4856 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -1114,22 +1114,12 @@ static void qlge_refill_bq(struct qlge_bq *bq)
        struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
        struct ql_adapter *qdev = rx_ring->qdev;
        struct qlge_bq_desc *bq_desc;
-       int free_count, refill_count;
-       unsigned int reserved_count;
+       int refill_count;
        int i;
 
-       if (bq->type == QLGE_SB)
-               reserved_count = 16;
-       else
-               reserved_count = 32;
-
-       free_count = bq->next_to_clean - bq->next_to_use;
-       if (free_count <= 0)
-               free_count += QLGE_BQ_LEN;
-
-       refill_count = free_count - reserved_count;
-       /* refill batch size */
-       if (refill_count < 16)
+       refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
+                                   bq->next_to_use);
+       if (!refill_count)
                return;
 
        i = bq->next_to_use;
@@ -1164,11 +1154,14 @@ static void qlge_refill_bq(struct qlge_bq *bq)
        i += QLGE_BQ_LEN;
 
        if (bq->next_to_use != i) {
-               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
-                            "ring %u %s: updating prod idx = %d.\n",
-                            rx_ring->cq_id, bq_type_name[bq->type], i);
+               if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
+                       netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+                                    "ring %u %s: updating prod idx = %d.\n",
+                                    rx_ring->cq_id, bq_type_name[bq->type],
+                                    i);
+                       ql_write_db_reg(i, bq->prod_idx_db_reg);
+               }
                bq->next_to_use = i;
-               ql_write_db_reg(bq->next_to_use, bq->prod_idx_db_reg);
        }
 }
 
-- 
2.23.0

_______________________________________________
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel

Reply via email to