From: Kevin Laatz <kevin.la...@intel.com>

Add a new API to query remaining descriptor ring capacity. This API is
useful, for example, when an application needs to enqueue a fragmented
packet and wants to ensure that all segments of the packet will be enqueued
together.

Signed-off-by: Kevin Laatz <kevin.la...@intel.com>
Signed-off-by: Bruce Richardson <bruce.richard...@intel.com>
---
 drivers/raw/ioat/ioat_rawdev_test.c    | 138 ++++++++++++++++++++++++-
 drivers/raw/ioat/rte_idxd_rawdev_fns.h |  22 ++++
 drivers/raw/ioat/rte_ioat_rawdev_fns.h |  24 +++++
 3 files changed, 183 insertions(+), 1 deletion(-)

diff --git a/drivers/raw/ioat/ioat_rawdev_test.c 
b/drivers/raw/ioat/ioat_rawdev_test.c
index 51eebe152f..5f75c6ff69 100644
--- a/drivers/raw/ioat/ioat_rawdev_test.c
+++ b/drivers/raw/ioat/ioat_rawdev_test.c
@@ -277,6 +277,138 @@ test_enqueue_fill(int dev_id)
        return 0;
 }
 
+static inline void
+reset_ring_ptrs(int dev_id)
+{
+       enum rte_ioat_dev_type *type =
+               (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+
+       if (*type == RTE_IDXD_DEV) {
+               struct rte_idxd_rawdev *idxd =
+                       (struct rte_idxd_rawdev 
*)rte_rawdevs[dev_id].dev_private;
+
+               idxd->batch_start = 0;
+               idxd->hdls_read = 0;
+       } else {
+               struct rte_ioat_rawdev *ioat =
+                       (struct rte_ioat_rawdev 
*)rte_rawdevs[dev_id].dev_private;
+
+               ioat->next_read = 0;
+               ioat->next_write = 0;
+       }
+}
+
+static int
+test_burst_capacity(int dev_id)
+{
+#define BURST_SIZE                     64
+       struct rte_mbuf *src, *dst;
+       unsigned int bursts_enqueued = 0;
+       unsigned int i;
+       unsigned int length = 1024;
+       uintptr_t completions[BURST_SIZE];
+
+       /* Ring pointer reset needed for checking test results */
+       reset_ring_ptrs(dev_id);
+
+       const unsigned int ring_space = rte_ioat_burst_capacity(dev_id);
+       const unsigned int expected_bursts = (ring_space)/BURST_SIZE;
+       src = rte_pktmbuf_alloc(pool);
+       dst = rte_pktmbuf_alloc(pool);
+
+       /* Enqueue burst until they won't fit */
+       while (rte_ioat_burst_capacity(dev_id) >= BURST_SIZE) {
+               for (i = 0; i < BURST_SIZE; i++) {
+
+                       if (rte_ioat_enqueue_copy(dev_id, rte_pktmbuf_iova(src),
+                                       rte_pktmbuf_iova(dst), length, 0, 0) != 
1) {
+                               PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
+                               return -1;
+                       }
+               }
+               bursts_enqueued++;
+               if ((i & 1) == 1) /* hit doorbell every second burst */
+                       rte_ioat_perform_ops(dev_id);
+       }
+       rte_ioat_perform_ops(dev_id);
+
+       /* check the number of bursts enqueued was as expected */
+       if (bursts_enqueued != expected_bursts) {
+               PRINT_ERR("Capacity test failed, enqueued %u not %u bursts\n",
+                               bursts_enqueued, expected_bursts);
+               return -1;
+       }
+
+       /* check the space is now as expected */
+       if (rte_ioat_burst_capacity(dev_id) != ring_space - bursts_enqueued * 
BURST_SIZE) {
+               printf("Capacity error. Expected %u free slots, got %u\n",
+                               ring_space - bursts_enqueued * BURST_SIZE,
+                               rte_ioat_burst_capacity(dev_id));
+               return -1;
+       }
+
+       /* do cleanup before next tests */
+       usleep(100);
+       for (i = 0; i < bursts_enqueued; i++) {
+               if (rte_ioat_completed_ops(dev_id, BURST_SIZE, completions,
+                               completions) != BURST_SIZE) {
+                       PRINT_ERR("error with completions\n");
+                       return -1;
+               }
+       }
+
+       /* Since we reset the ring pointers before the previous test, and we 
enqueued
+        * the max amount of bursts, enqueuing one more burst will enable us to 
test
+        * the wrap around handling in rte_ioat_burst_capacity().
+        */
+
+       /* Verify the descriptor ring is empty before we test */
+       if (rte_ioat_burst_capacity(dev_id) != ring_space) {
+               PRINT_ERR("Error, ring should be empty\n");
+               return -1;
+       }
+
+       /* Enqueue one burst of mbufs & verify the expected space is taken */
+       for (i = 0; i < BURST_SIZE; i++) {
+               if (rte_ioat_enqueue_copy(dev_id, rte_pktmbuf_iova(src),
+                               rte_pktmbuf_iova(dst), length, 0, 0) != 1) {
+                       PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
+                       return -1;
+               }
+       }
+
+       /* Perform the copy before checking the capacity again so that the write
+        * pointer in the descriptor ring is wrapped/masked
+        */
+       rte_ioat_perform_ops(dev_id);
+       usleep(100);
+
+       /* This check will confirm both that the correct amount of space is 
taken
+        * the ring, and that the ring wrap around handling is correct.
+        */
+       if (rte_ioat_burst_capacity(dev_id) != ring_space - BURST_SIZE) {
+               PRINT_ERR("Error, space available not as expected\n");
+               return -1;
+       }
+
+       /* Now we gather completions to update the read pointer */
+       if (rte_ioat_completed_ops(dev_id, BURST_SIZE, completions, 
completions) != BURST_SIZE) {
+               PRINT_ERR("Error with completions\n");
+               return -1;
+       }
+
+       /* After gathering the completions, the descriptor ring should be empty 
*/
+       if (rte_ioat_burst_capacity(dev_id) != ring_space) {
+               PRINT_ERR("Error, space available not as expected\n");
+               return -1;
+       }
+
+       rte_pktmbuf_free(src);
+       rte_pktmbuf_free(dst);
+
+       return 0;
+}
+
 int
 ioat_rawdev_test(uint16_t dev_id)
 {
@@ -321,7 +453,7 @@ ioat_rawdev_test(uint16_t dev_id)
        }
 
        pool = rte_pktmbuf_pool_create("TEST_IOAT_POOL",
-                       256, /* n == num elements */
+                       p.ring_size * 2, /* n == num elements */
                        32,  /* cache size */
                        0,   /* priv size */
                        2048, /* data room size */
@@ -385,6 +517,10 @@ ioat_rawdev_test(uint16_t dev_id)
        }
        printf("\n");
 
+       printf("Running Burst Capacity Test\n");
+       if (test_burst_capacity(dev_id) != 0)
+               goto err;
+
        rte_rawdev_stop(dev_id);
        if (rte_rawdev_xstats_reset(dev_id, NULL, 0) != 0) {
                PRINT_ERR("Error resetting xstat values\n");
diff --git a/drivers/raw/ioat/rte_idxd_rawdev_fns.h 
b/drivers/raw/ioat/rte_idxd_rawdev_fns.h
index 4c49d2b84a..41f0ad6e99 100644
--- a/drivers/raw/ioat/rte_idxd_rawdev_fns.h
+++ b/drivers/raw/ioat/rte_idxd_rawdev_fns.h
@@ -106,6 +106,28 @@ struct rte_idxd_rawdev {
        struct rte_idxd_user_hdl *hdl_ring;
 };
 
+static __rte_always_inline uint16_t
+__idxd_burst_capacity(int dev_id)
+{
+       struct rte_idxd_rawdev *idxd =
+                       (struct rte_idxd_rawdev 
*)rte_rawdevs[dev_id].dev_private;
+       uint16_t write_idx = idxd->batch_start + idxd->batch_size;
+       uint16_t used_space;
+
+       /* Check for space in the batch ring */
+       if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == 
idxd->max_batches) ||
+                       idxd->batch_idx_write + 1 == idxd->batch_idx_read)
+               return 0;
+
+       /* for descriptors, check for wrap-around on write but not read */
+       if (idxd->hdls_read > write_idx)
+               write_idx += idxd->desc_ring_mask + 1;
+       used_space = write_idx - idxd->hdls_read;
+
+       /* Return amount of free space in the descriptor ring */
+       return idxd->desc_ring_mask - used_space;
+}
+
 static __rte_always_inline rte_iova_t
 __desc_idx_to_iova(struct rte_idxd_rawdev *idxd, uint16_t n)
 {
diff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h 
b/drivers/raw/ioat/rte_ioat_rawdev_fns.h
index 598852b1fa..92ccdd03b9 100644
--- a/drivers/raw/ioat/rte_ioat_rawdev_fns.h
+++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h
@@ -100,6 +100,19 @@ struct rte_ioat_rawdev {
 #define RTE_IOAT_CHANSTS_HALTED                        0x3
 #define RTE_IOAT_CHANSTS_ARMED                 0x4
 
+static __rte_always_inline uint16_t
+__ioat_burst_capacity(int dev_id)
+{
+       struct rte_ioat_rawdev *ioat =
+                       (struct rte_ioat_rawdev 
*)rte_rawdevs[dev_id].dev_private;
+       unsigned short size = ioat->ring_size - 1;
+       unsigned short read = ioat->next_read;
+       unsigned short write = ioat->next_write;
+       unsigned short space = size - (write - read);
+
+       return space;
+}
+
 static __rte_always_inline int
 __ioat_write_desc(int dev_id, uint32_t op, uint64_t src, phys_addr_t dst,
                unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
@@ -260,6 +273,17 @@ __ioat_completed_ops(int dev_id, uint8_t max_copies,
        return count;
 }
 
+static inline uint16_t
+rte_ioat_burst_capacity(int dev_id)
+{
+       enum rte_ioat_dev_type *type =
+               (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+       if (*type == RTE_IDXD_DEV)
+               return __idxd_burst_capacity(dev_id);
+       else
+               return __ioat_burst_capacity(dev_id);
+}
+
 static inline int
 rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
                unsigned int len, uintptr_t dst_hdl)
-- 
2.30.2

Reply via email to